Vendor: Update k8s version
Signed-off-by: Michał Żyłowski <michal.zylowski@intel.com>
This commit is contained in:
parent
dfa93414c5
commit
52baf68d50
3756 changed files with 113013 additions and 92675 deletions
18
vendor/k8s.io/kubernetes/pkg/kubelet/BUILD
generated
vendored
18
vendor/k8s.io/kubernetes/pkg/kubelet/BUILD
generated
vendored
|
@ -45,10 +45,10 @@ go_library(
|
|||
"//pkg/capabilities:go_default_library",
|
||||
"//pkg/client/cache:go_default_library",
|
||||
"//pkg/client/clientset_generated/clientset:go_default_library",
|
||||
"//pkg/client/legacylisters:go_default_library",
|
||||
"//pkg/client/record:go_default_library",
|
||||
"//pkg/cloudprovider:go_default_library",
|
||||
"//pkg/fieldpath:go_default_library",
|
||||
"//pkg/fields:go_default_library",
|
||||
"//pkg/kubelet/api:go_default_library",
|
||||
"//pkg/kubelet/cadvisor:go_default_library",
|
||||
"//pkg/kubelet/cm:go_default_library",
|
||||
|
@ -72,6 +72,7 @@ go_library(
|
|||
"//pkg/kubelet/qos:go_default_library",
|
||||
"//pkg/kubelet/remote:go_default_library",
|
||||
"//pkg/kubelet/rkt:go_default_library",
|
||||
"//pkg/kubelet/secret:go_default_library",
|
||||
"//pkg/kubelet/server:go_default_library",
|
||||
"//pkg/kubelet/server/remotecommand:go_default_library",
|
||||
"//pkg/kubelet/server/stats:go_default_library",
|
||||
|
@ -87,12 +88,9 @@ go_library(
|
|||
"//pkg/securitycontext:go_default_library",
|
||||
"//pkg/util:go_default_library",
|
||||
"//pkg/util/bandwidth:go_default_library",
|
||||
"//pkg/util/clock:go_default_library",
|
||||
"//pkg/util/config:go_default_library",
|
||||
"//pkg/util/dbus:go_default_library",
|
||||
"//pkg/util/exec:go_default_library",
|
||||
"//pkg/util/flowcontrol:go_default_library",
|
||||
"//pkg/util/integer:go_default_library",
|
||||
"//pkg/util/io:go_default_library",
|
||||
"//pkg/util/iptables:go_default_library",
|
||||
"//pkg/util/mount:go_default_library",
|
||||
|
@ -115,6 +113,7 @@ go_library(
|
|||
"//vendor:k8s.io/apimachinery/pkg/api/errors",
|
||||
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
|
||||
"//vendor:k8s.io/apimachinery/pkg/conversion",
|
||||
"//vendor:k8s.io/apimachinery/pkg/fields",
|
||||
"//vendor:k8s.io/apimachinery/pkg/labels",
|
||||
"//vendor:k8s.io/apimachinery/pkg/types",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/errors",
|
||||
|
@ -125,6 +124,9 @@ go_library(
|
|||
"//vendor:k8s.io/apimachinery/pkg/util/validation/field",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/wait",
|
||||
"//vendor:k8s.io/client-go/kubernetes",
|
||||
"//vendor:k8s.io/client-go/util/clock",
|
||||
"//vendor:k8s.io/client-go/util/flowcontrol",
|
||||
"//vendor:k8s.io/client-go/util/integer",
|
||||
],
|
||||
)
|
||||
|
||||
|
@ -173,6 +175,7 @@ go_test(
|
|||
"//pkg/kubelet/pod/testing:go_default_library",
|
||||
"//pkg/kubelet/prober/results:go_default_library",
|
||||
"//pkg/kubelet/prober/testing:go_default_library",
|
||||
"//pkg/kubelet/secret:go_default_library",
|
||||
"//pkg/kubelet/server/remotecommand:go_default_library",
|
||||
"//pkg/kubelet/server/stats:go_default_library",
|
||||
"//pkg/kubelet/status:go_default_library",
|
||||
|
@ -181,11 +184,8 @@ go_test(
|
|||
"//pkg/kubelet/util/sliceutils:go_default_library",
|
||||
"//pkg/kubelet/volumemanager:go_default_library",
|
||||
"//pkg/util/bandwidth:go_default_library",
|
||||
"//pkg/util/clock:go_default_library",
|
||||
"//pkg/util/flowcontrol:go_default_library",
|
||||
"//pkg/util/mount:go_default_library",
|
||||
"//pkg/util/strategicpatch:go_default_library",
|
||||
"//pkg/util/testing:go_default_library",
|
||||
"//pkg/util/uuid:go_default_library",
|
||||
"//pkg/version:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
|
@ -206,6 +206,9 @@ go_test(
|
|||
"//vendor:k8s.io/apimachinery/pkg/util/runtime",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/sets",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/wait",
|
||||
"//vendor:k8s.io/client-go/util/clock",
|
||||
"//vendor:k8s.io/client-go/util/flowcontrol",
|
||||
"//vendor:k8s.io/client-go/util/testing",
|
||||
],
|
||||
)
|
||||
|
||||
|
@ -245,6 +248,7 @@ filegroup(
|
|||
"//pkg/kubelet/remote:all-srcs",
|
||||
"//pkg/kubelet/rkt:all-srcs",
|
||||
"//pkg/kubelet/rktshim:all-srcs",
|
||||
"//pkg/kubelet/secret:all-srcs",
|
||||
"//pkg/kubelet/server:all-srcs",
|
||||
"//pkg/kubelet/status:all-srcs",
|
||||
"//pkg/kubelet/sysctl:all-srcs",
|
||||
|
|
13
vendor/k8s.io/kubernetes/pkg/kubelet/OWNERS
generated
vendored
13
vendor/k8s.io/kubernetes/pkg/kubelet/OWNERS
generated
vendored
|
@ -1,4 +1,9 @@
|
|||
assignees:
|
||||
- dchen1107
|
||||
- vishh
|
||||
- yujuhong
|
||||
approvers:
|
||||
- Random-Liu
|
||||
- dchen1107
|
||||
- derekwaynecarr
|
||||
- timstclair
|
||||
- vishh
|
||||
- yujuhong
|
||||
reviewers:
|
||||
- sig-node-reviewers
|
||||
|
|
2
vendor/k8s.io/kubernetes/pkg/kubelet/active_deadline.go
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/kubelet/active_deadline.go
generated
vendored
|
@ -20,11 +20,11 @@ import (
|
|||
"fmt"
|
||||
"time"
|
||||
|
||||
"k8s.io/client-go/util/clock"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/client/record"
|
||||
"k8s.io/kubernetes/pkg/kubelet/lifecycle"
|
||||
"k8s.io/kubernetes/pkg/kubelet/status"
|
||||
"k8s.io/kubernetes/pkg/util/clock"
|
||||
)
|
||||
|
||||
const (
|
||||
|
|
2
vendor/k8s.io/kubernetes/pkg/kubelet/active_deadline_test.go
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/kubelet/active_deadline_test.go
generated
vendored
|
@ -22,9 +22,9 @@ import (
|
|||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/util/clock"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/client/record"
|
||||
"k8s.io/kubernetes/pkg/util/clock"
|
||||
)
|
||||
|
||||
// mockPodStatusProvider returns the status on the specified pod
|
||||
|
|
14
vendor/k8s.io/kubernetes/pkg/kubelet/api/testing/fake_image_service.go
generated
vendored
14
vendor/k8s.io/kubernetes/pkg/kubelet/api/testing/fake_image_service.go
generated
vendored
|
@ -57,8 +57,8 @@ func NewFakeImageService() *FakeImageService {
|
|||
|
||||
func (r *FakeImageService) makeFakeImage(image string) *runtimeapi.Image {
|
||||
return &runtimeapi.Image{
|
||||
Id: &image,
|
||||
Size_: &r.FakeImageSize,
|
||||
Id: image,
|
||||
Size_: r.FakeImageSize,
|
||||
RepoTags: []string{image},
|
||||
}
|
||||
}
|
||||
|
@ -72,7 +72,7 @@ func (r *FakeImageService) ListImages(filter *runtimeapi.ImageFilter) ([]*runtim
|
|||
images := make([]*runtimeapi.Image, 0)
|
||||
for _, img := range r.Images {
|
||||
if filter != nil && filter.Image != nil {
|
||||
if !sliceutils.StringInSlice(filter.Image.GetImage(), img.RepoTags) {
|
||||
if !sliceutils.StringInSlice(filter.Image.Image, img.RepoTags) {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
@ -88,7 +88,7 @@ func (r *FakeImageService) ImageStatus(image *runtimeapi.ImageSpec) (*runtimeapi
|
|||
|
||||
r.Called = append(r.Called, "ImageStatus")
|
||||
|
||||
return r.Images[image.GetImage()], nil
|
||||
return r.Images[image.Image], nil
|
||||
}
|
||||
|
||||
func (r *FakeImageService) PullImage(image *runtimeapi.ImageSpec, auth *runtimeapi.AuthConfig) (string, error) {
|
||||
|
@ -99,9 +99,9 @@ func (r *FakeImageService) PullImage(image *runtimeapi.ImageSpec, auth *runtimea
|
|||
|
||||
// ImageID should be randomized for real container runtime, but here just use
|
||||
// image's name for easily making fake images.
|
||||
imageID := image.GetImage()
|
||||
imageID := image.Image
|
||||
if _, ok := r.Images[imageID]; !ok {
|
||||
r.Images[imageID] = r.makeFakeImage(image.GetImage())
|
||||
r.Images[imageID] = r.makeFakeImage(image.Image)
|
||||
}
|
||||
|
||||
return imageID, nil
|
||||
|
@ -114,7 +114,7 @@ func (r *FakeImageService) RemoveImage(image *runtimeapi.ImageSpec) error {
|
|||
r.Called = append(r.Called, "RemoveImage")
|
||||
|
||||
// Remove the image
|
||||
delete(r.Images, image.GetImage())
|
||||
delete(r.Images, image.Image)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
62
vendor/k8s.io/kubernetes/pkg/kubelet/api/testing/fake_runtime_service.go
generated
vendored
62
vendor/k8s.io/kubernetes/pkg/kubelet/api/testing/fake_runtime_service.go
generated
vendored
|
@ -61,7 +61,7 @@ func (r *FakeRuntimeService) SetFakeSandboxes(sandboxes []*FakePodSandbox) {
|
|||
|
||||
r.Sandboxes = make(map[string]*FakePodSandbox)
|
||||
for _, sandbox := range sandboxes {
|
||||
sandboxID := sandbox.GetId()
|
||||
sandboxID := sandbox.Id
|
||||
r.Sandboxes[sandboxID] = sandbox
|
||||
}
|
||||
}
|
||||
|
@ -72,7 +72,7 @@ func (r *FakeRuntimeService) SetFakeContainers(containers []*FakeContainer) {
|
|||
|
||||
r.Containers = make(map[string]*FakeContainer)
|
||||
for _, c := range containers {
|
||||
containerID := c.GetId()
|
||||
containerID := c.Id
|
||||
r.Containers[containerID] = c
|
||||
}
|
||||
|
||||
|
@ -103,10 +103,10 @@ func (r *FakeRuntimeService) Version(apiVersion string) (*runtimeapi.VersionResp
|
|||
r.Called = append(r.Called, "Version")
|
||||
|
||||
return &runtimeapi.VersionResponse{
|
||||
Version: &version,
|
||||
RuntimeName: &FakeRuntimeName,
|
||||
RuntimeVersion: &version,
|
||||
RuntimeApiVersion: &version,
|
||||
Version: version,
|
||||
RuntimeName: FakeRuntimeName,
|
||||
RuntimeVersion: version,
|
||||
RuntimeApiVersion: version,
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
@ -128,16 +128,15 @@ func (r *FakeRuntimeService) RunPodSandbox(config *runtimeapi.PodSandboxConfig)
|
|||
// PodSandboxID should be randomized for real container runtime, but here just use
|
||||
// fixed name from BuildSandboxName() for easily making fake sandboxes.
|
||||
podSandboxID := BuildSandboxName(config.Metadata)
|
||||
createdAt := time.Now().Unix()
|
||||
readyState := runtimeapi.PodSandboxState_SANDBOX_READY
|
||||
createdAt := time.Now().UnixNano()
|
||||
r.Sandboxes[podSandboxID] = &FakePodSandbox{
|
||||
PodSandboxStatus: runtimeapi.PodSandboxStatus{
|
||||
Id: &podSandboxID,
|
||||
Id: podSandboxID,
|
||||
Metadata: config.Metadata,
|
||||
State: &readyState,
|
||||
CreatedAt: &createdAt,
|
||||
State: runtimeapi.PodSandboxState_SANDBOX_READY,
|
||||
CreatedAt: createdAt,
|
||||
Network: &runtimeapi.PodSandboxNetworkStatus{
|
||||
Ip: &FakePodSandboxIP,
|
||||
Ip: FakePodSandboxIP,
|
||||
},
|
||||
Labels: config.Labels,
|
||||
Annotations: config.Annotations,
|
||||
|
@ -153,9 +152,8 @@ func (r *FakeRuntimeService) StopPodSandbox(podSandboxID string) error {
|
|||
|
||||
r.Called = append(r.Called, "StopPodSandbox")
|
||||
|
||||
notReadyState := runtimeapi.PodSandboxState_SANDBOX_NOTREADY
|
||||
if s, ok := r.Sandboxes[podSandboxID]; ok {
|
||||
s.State = ¬ReadyState
|
||||
s.State = runtimeapi.PodSandboxState_SANDBOX_NOTREADY
|
||||
} else {
|
||||
return fmt.Errorf("pod sandbox %s not found", podSandboxID)
|
||||
}
|
||||
|
@ -199,10 +197,10 @@ func (r *FakeRuntimeService) ListPodSandbox(filter *runtimeapi.PodSandboxFilter)
|
|||
result := make([]*runtimeapi.PodSandbox, 0)
|
||||
for id, s := range r.Sandboxes {
|
||||
if filter != nil {
|
||||
if filter.Id != nil && filter.GetId() != id {
|
||||
if filter.Id != "" && filter.Id != id {
|
||||
continue
|
||||
}
|
||||
if filter.State != nil && filter.GetState() != s.GetState() {
|
||||
if filter.State != nil && filter.GetState().State != s.State {
|
||||
continue
|
||||
}
|
||||
if filter.LabelSelector != nil && !filterInLabels(filter.LabelSelector, s.GetLabels()) {
|
||||
|
@ -240,17 +238,17 @@ func (r *FakeRuntimeService) CreateContainer(podSandboxID string, config *runtim
|
|||
// ContainerID should be randomized for real container runtime, but here just use
|
||||
// fixed BuildContainerName() for easily making fake containers.
|
||||
containerID := BuildContainerName(config.Metadata, podSandboxID)
|
||||
createdAt := time.Now().Unix()
|
||||
createdAt := time.Now().UnixNano()
|
||||
createdState := runtimeapi.ContainerState_CONTAINER_CREATED
|
||||
imageRef := config.Image.GetImage()
|
||||
imageRef := config.Image.Image
|
||||
r.Containers[containerID] = &FakeContainer{
|
||||
ContainerStatus: runtimeapi.ContainerStatus{
|
||||
Id: &containerID,
|
||||
Id: containerID,
|
||||
Metadata: config.Metadata,
|
||||
Image: config.Image,
|
||||
ImageRef: &imageRef,
|
||||
CreatedAt: &createdAt,
|
||||
State: &createdState,
|
||||
ImageRef: imageRef,
|
||||
CreatedAt: createdAt,
|
||||
State: createdState,
|
||||
Labels: config.Labels,
|
||||
Annotations: config.Annotations,
|
||||
},
|
||||
|
@ -272,10 +270,8 @@ func (r *FakeRuntimeService) StartContainer(containerID string) error {
|
|||
}
|
||||
|
||||
// Set container to running.
|
||||
startedAt := time.Now().Unix()
|
||||
runningState := runtimeapi.ContainerState_CONTAINER_RUNNING
|
||||
c.State = &runningState
|
||||
c.StartedAt = &startedAt
|
||||
c.State = runtimeapi.ContainerState_CONTAINER_RUNNING
|
||||
c.StartedAt = time.Now().UnixNano()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -292,10 +288,10 @@ func (r *FakeRuntimeService) StopContainer(containerID string, timeout int64) er
|
|||
}
|
||||
|
||||
// Set container to exited state.
|
||||
finishedAt := time.Now().Unix()
|
||||
finishedAt := time.Now().UnixNano()
|
||||
exitedState := runtimeapi.ContainerState_CONTAINER_EXITED
|
||||
c.State = &exitedState
|
||||
c.FinishedAt = &finishedAt
|
||||
c.State = exitedState
|
||||
c.FinishedAt = finishedAt
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -321,13 +317,13 @@ func (r *FakeRuntimeService) ListContainers(filter *runtimeapi.ContainerFilter)
|
|||
result := make([]*runtimeapi.Container, 0)
|
||||
for _, s := range r.Containers {
|
||||
if filter != nil {
|
||||
if filter.Id != nil && filter.GetId() != s.GetId() {
|
||||
if filter.Id != "" && filter.Id != s.Id {
|
||||
continue
|
||||
}
|
||||
if filter.PodSandboxId != nil && filter.GetPodSandboxId() != s.SandboxID {
|
||||
if filter.PodSandboxId != "" && filter.PodSandboxId != s.SandboxID {
|
||||
continue
|
||||
}
|
||||
if filter.State != nil && filter.GetState() != s.GetState() {
|
||||
if filter.State != nil && filter.GetState().State != s.State {
|
||||
continue
|
||||
}
|
||||
if filter.LabelSelector != nil && !filterInLabels(filter.LabelSelector, s.GetLabels()) {
|
||||
|
@ -338,7 +334,7 @@ func (r *FakeRuntimeService) ListContainers(filter *runtimeapi.ContainerFilter)
|
|||
result = append(result, &runtimeapi.Container{
|
||||
Id: s.Id,
|
||||
CreatedAt: s.CreatedAt,
|
||||
PodSandboxId: &s.SandboxID,
|
||||
PodSandboxId: s.SandboxID,
|
||||
Metadata: s.Metadata,
|
||||
State: s.State,
|
||||
Image: s.Image,
|
||||
|
|
4
vendor/k8s.io/kubernetes/pkg/kubelet/api/testing/utils.go
generated
vendored
4
vendor/k8s.io/kubernetes/pkg/kubelet/api/testing/utils.go
generated
vendored
|
@ -24,11 +24,11 @@ import (
|
|||
|
||||
func BuildContainerName(metadata *runtimeapi.ContainerMetadata, sandboxID string) string {
|
||||
// include the sandbox ID to make the container ID unique.
|
||||
return fmt.Sprintf("%s_%s_%d", sandboxID, metadata.GetName(), metadata.GetAttempt())
|
||||
return fmt.Sprintf("%s_%s_%d", sandboxID, metadata.Name, metadata.Attempt)
|
||||
}
|
||||
|
||||
func BuildSandboxName(metadata *runtimeapi.PodSandboxMetadata) string {
|
||||
return fmt.Sprintf("%s_%s_%s_%d", metadata.GetName(), metadata.GetNamespace(), metadata.GetUid(), metadata.GetAttempt())
|
||||
return fmt.Sprintf("%s_%s_%s_%d", metadata.Name, metadata.Namespace, metadata.Uid, metadata.Attempt)
|
||||
}
|
||||
|
||||
func filterInLabels(filter, labels map[string]string) bool {
|
||||
|
|
2
vendor/k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime/BUILD
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime/BUILD
generated
vendored
|
@ -15,7 +15,9 @@ go_library(
|
|||
],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//vendor:github.com/gogo/protobuf/gogoproto",
|
||||
"//vendor:github.com/gogo/protobuf/proto",
|
||||
"//vendor:github.com/gogo/protobuf/sortkeys",
|
||||
"//vendor:golang.org/x/net/context",
|
||||
"//vendor:google.golang.org/grpc",
|
||||
],
|
||||
|
|
18912
vendor/k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime/api.pb.go
generated
vendored
18912
vendor/k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime/api.pb.go
generated
vendored
File diff suppressed because it is too large
Load diff
399
vendor/k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime/api.proto
generated
vendored
399
vendor/k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime/api.proto
generated
vendored
|
@ -1,8 +1,18 @@
|
|||
// To regenerate api.pb.go run hack/update-generated-runtime.sh
|
||||
syntax = 'proto2';
|
||||
syntax = 'proto3';
|
||||
|
||||
package runtime;
|
||||
|
||||
import "github.com/gogo/protobuf/gogoproto/gogo.proto";
|
||||
|
||||
option (gogoproto.goproto_stringer_all) = false;
|
||||
option (gogoproto.stringer_all) = true;
|
||||
option (gogoproto.goproto_getters_all) = true;
|
||||
option (gogoproto.marshaler_all) = true;
|
||||
option (gogoproto.sizer_all) = true;
|
||||
option (gogoproto.unmarshaler_all) = true;
|
||||
option (gogoproto.goproto_unrecognized_all) = false;
|
||||
|
||||
// Runtime service defines the public APIs for remote container runtimes
|
||||
service RuntimeService {
|
||||
// Version returns the runtime name, runtime version, and runtime API version.
|
||||
|
@ -84,20 +94,20 @@ service ImageService {
|
|||
|
||||
message VersionRequest {
|
||||
// Version of the kubelet runtime API.
|
||||
optional string version = 1;
|
||||
string version = 1;
|
||||
}
|
||||
|
||||
message VersionResponse {
|
||||
// Version of the kubelet runtime API.
|
||||
optional string version = 1;
|
||||
string version = 1;
|
||||
// Name of the container runtime.
|
||||
optional string runtime_name = 2;
|
||||
string runtime_name = 2;
|
||||
// Version of the container runtime. The string must be
|
||||
// semver-compatible.
|
||||
optional string runtime_version = 3;
|
||||
string runtime_version = 3;
|
||||
// API version of the container runtime. The string must be
|
||||
// semver-compatible.
|
||||
optional string runtime_api_version = 4;
|
||||
string runtime_api_version = 4;
|
||||
}
|
||||
|
||||
// DNSConfig specifies the DNS servers and search domains of a sandbox.
|
||||
|
@ -119,35 +129,41 @@ enum Protocol {
|
|||
// PortMapping specifies the port mapping configurations of a sandbox.
|
||||
message PortMapping {
|
||||
// Protocol of the port mapping.
|
||||
optional Protocol protocol = 1;
|
||||
// Port number within the container.
|
||||
optional int32 container_port = 2;
|
||||
// Port number on the host.
|
||||
optional int32 host_port = 3;
|
||||
Protocol protocol = 1;
|
||||
// Port number within the container. Default: 0 (not specified).
|
||||
int32 container_port = 2;
|
||||
// Port number on the host. Default: 0 (not specified).
|
||||
int32 host_port = 3;
|
||||
// Host IP.
|
||||
optional string host_ip = 4;
|
||||
string host_ip = 4;
|
||||
}
|
||||
|
||||
// Mount specifies a host volume to mount into a container.
|
||||
message Mount {
|
||||
// Path of the mount within the container.
|
||||
optional string container_path = 1;
|
||||
string container_path = 1;
|
||||
// Path of the mount on the host.
|
||||
optional string host_path = 2;
|
||||
string host_path = 2;
|
||||
// If set, the mount is read-only.
|
||||
optional bool readonly = 3;
|
||||
bool readonly = 3;
|
||||
// If set, the mount needs SELinux relabeling.
|
||||
optional bool selinux_relabel = 4;
|
||||
bool selinux_relabel = 4;
|
||||
}
|
||||
|
||||
// NamespaceOption provides options for Linux namespaces.
|
||||
message NamespaceOption {
|
||||
// If set, use the host's network namespace.
|
||||
optional bool host_network = 1;
|
||||
bool host_network = 1;
|
||||
// If set, use the host's PID namespace.
|
||||
optional bool host_pid = 2;
|
||||
bool host_pid = 2;
|
||||
// If set, use the host's IPC namespace.
|
||||
optional bool host_ipc = 3;
|
||||
bool host_ipc = 3;
|
||||
}
|
||||
|
||||
// Int64Value is the wrapper of int64.
|
||||
message Int64Value {
|
||||
// The value.
|
||||
int64 value = 1;
|
||||
}
|
||||
|
||||
// LinuxSandboxSecurityContext holds linux security configuration that will be
|
||||
|
@ -158,13 +174,13 @@ message NamespaceOption {
|
|||
message LinuxSandboxSecurityContext {
|
||||
// Configurations for the sandbox's namespaces.
|
||||
// This will be used only if the PodSandbox uses namespace for isolation.
|
||||
optional NamespaceOption namespace_options = 1;
|
||||
NamespaceOption namespace_options = 1;
|
||||
// Optional SELinux context to be applied.
|
||||
optional SELinuxOption selinux_options = 2;
|
||||
SELinuxOption selinux_options = 2;
|
||||
// UID to run sandbox processes as, when applicable.
|
||||
optional int64 run_as_user = 3;
|
||||
Int64Value run_as_user = 3;
|
||||
// If set, the root filesystem of the sandbox is read-only.
|
||||
optional bool readonly_rootfs = 4;
|
||||
bool readonly_rootfs = 4;
|
||||
// List of groups applied to the first process run in the sandbox, in
|
||||
// addition to the sandbox's primary GID.
|
||||
repeated int64 supplemental_groups = 5;
|
||||
|
@ -173,7 +189,7 @@ message LinuxSandboxSecurityContext {
|
|||
// MUST be true.
|
||||
// This allows a sandbox to take additional security precautions if no
|
||||
// privileged containers are expected to be run.
|
||||
optional bool privileged = 6;
|
||||
bool privileged = 6;
|
||||
}
|
||||
|
||||
// LinuxPodSandboxConfig holds platform-specific configurations for Linux
|
||||
|
@ -182,9 +198,9 @@ message LinuxPodSandboxConfig {
|
|||
// Parent cgroup of the PodSandbox.
|
||||
// The cgroupfs style syntax will be used, but the container runtime can
|
||||
// convert it to systemd semantics if needed.
|
||||
optional string cgroup_parent = 1;
|
||||
string cgroup_parent = 1;
|
||||
// LinuxSandboxSecurityContext holds sandbox security attributes.
|
||||
optional LinuxSandboxSecurityContext security_context = 2;
|
||||
LinuxSandboxSecurityContext security_context = 2;
|
||||
}
|
||||
|
||||
// PodSandboxMetadata holds all necessary information for building the sandbox name.
|
||||
|
@ -193,13 +209,13 @@ message LinuxPodSandboxConfig {
|
|||
// the runtime can construct a unique PodSandboxName based on the metadata.
|
||||
message PodSandboxMetadata {
|
||||
// Pod name of the sandbox. Same as the pod name in the PodSpec.
|
||||
optional string name = 1;
|
||||
string name = 1;
|
||||
// Pod UID of the sandbox. Same as the pod UID in the PodSpec.
|
||||
optional string uid = 2;
|
||||
string uid = 2;
|
||||
// Pod namespace of the sandbox. Same as the pod namespace in the PodSpec.
|
||||
optional string namespace = 3;
|
||||
// Attempt number of creating the sandbox.
|
||||
optional uint32 attempt = 4;
|
||||
string namespace = 3;
|
||||
// Attempt number of creating the sandbox. Default: 0.
|
||||
uint32 attempt = 4;
|
||||
}
|
||||
|
||||
// PodSandboxConfig holds all the required and optional fields for creating a
|
||||
|
@ -209,9 +225,9 @@ message PodSandboxConfig {
|
|||
// sandbox, and the runtime should leverage this to ensure correct
|
||||
// operation. The runtime may also use this information to improve UX, such
|
||||
// as by constructing a readable name.
|
||||
optional PodSandboxMetadata metadata = 1;
|
||||
PodSandboxMetadata metadata = 1;
|
||||
// Hostname of the sandbox.
|
||||
optional string hostname = 2;
|
||||
string hostname = 2;
|
||||
// Path to the directory on the host in which container log files are
|
||||
// stored.
|
||||
// By default the log of a container going into the LogDirectory will be
|
||||
|
@ -227,9 +243,9 @@ message PodSandboxConfig {
|
|||
// container logs are under active discussion in
|
||||
// https://issues.k8s.io/24677. There *may* be future change of direction
|
||||
// for logging as the discussion carries on.
|
||||
optional string log_directory = 3;
|
||||
string log_directory = 3;
|
||||
// DNS config for the sandbox.
|
||||
optional DNSConfig dns_config = 4;
|
||||
DNSConfig dns_config = 4;
|
||||
// Port mappings for the sandbox.
|
||||
repeated PortMapping port_mappings = 5;
|
||||
// Key-value pairs that may be used to scope and select individual resources.
|
||||
|
@ -292,77 +308,77 @@ message PodSandboxConfig {
|
|||
//
|
||||
map<string, string> annotations = 7;
|
||||
// Optional configurations specific to Linux hosts.
|
||||
optional LinuxPodSandboxConfig linux = 8;
|
||||
LinuxPodSandboxConfig linux = 8;
|
||||
}
|
||||
|
||||
message RunPodSandboxRequest {
|
||||
// Configuration for creating a PodSandbox.
|
||||
optional PodSandboxConfig config = 1;
|
||||
PodSandboxConfig config = 1;
|
||||
}
|
||||
|
||||
message RunPodSandboxResponse {
|
||||
// ID of the PodSandbox to run.
|
||||
optional string pod_sandbox_id = 1;
|
||||
string pod_sandbox_id = 1;
|
||||
}
|
||||
|
||||
message StopPodSandboxRequest {
|
||||
// ID of the PodSandbox to stop.
|
||||
optional string pod_sandbox_id = 1;
|
||||
string pod_sandbox_id = 1;
|
||||
}
|
||||
|
||||
message StopPodSandboxResponse {}
|
||||
|
||||
message RemovePodSandboxRequest {
|
||||
// ID of the PodSandbox to remove.
|
||||
optional string pod_sandbox_id = 1;
|
||||
string pod_sandbox_id = 1;
|
||||
}
|
||||
|
||||
message RemovePodSandboxResponse {}
|
||||
|
||||
message PodSandboxStatusRequest {
|
||||
// ID of the PodSandbox for which to retrieve status.
|
||||
optional string pod_sandbox_id = 1;
|
||||
string pod_sandbox_id = 1;
|
||||
}
|
||||
|
||||
// PodSandboxNetworkStatus is the status of the network for a PodSandbox.
|
||||
message PodSandboxNetworkStatus {
|
||||
// IP address of the PodSandbox.
|
||||
optional string ip = 1;
|
||||
string ip = 1;
|
||||
}
|
||||
|
||||
// Namespace contains paths to the namespaces.
|
||||
message Namespace {
|
||||
// Path to the network namespace.
|
||||
optional string network = 1;
|
||||
string network = 1;
|
||||
// Namespace options for Linux namespaces.
|
||||
optional NamespaceOption options = 2;
|
||||
NamespaceOption options = 2;
|
||||
}
|
||||
|
||||
// LinuxSandboxStatus contains status specific to Linux sandboxes.
|
||||
message LinuxPodSandboxStatus {
|
||||
// Paths to the sandbox's namespaces.
|
||||
optional Namespace namespaces = 1;
|
||||
Namespace namespaces = 1;
|
||||
}
|
||||
|
||||
enum PodSandboxState {
|
||||
SANDBOX_READY = 0;
|
||||
SANDBOX_READY = 0;
|
||||
SANDBOX_NOTREADY = 1;
|
||||
}
|
||||
|
||||
// PodSandboxStatus contains the status of the PodSandbox.
|
||||
message PodSandboxStatus {
|
||||
// ID of the sandbox.
|
||||
optional string id = 1;
|
||||
string id = 1;
|
||||
// Metadata of the sandbox.
|
||||
optional PodSandboxMetadata metadata = 2;
|
||||
PodSandboxMetadata metadata = 2;
|
||||
// State of the sandbox.
|
||||
optional PodSandboxState state = 3;
|
||||
// Creation timestamp of the sandbox in nanoseconds.
|
||||
optional int64 created_at = 4;
|
||||
PodSandboxState state = 3;
|
||||
// Creation timestamp of the sandbox in nanoseconds. Must be > 0.
|
||||
int64 created_at = 4;
|
||||
// Network contains network status if network is handled by the runtime.
|
||||
optional PodSandboxNetworkStatus network = 5;
|
||||
PodSandboxNetworkStatus network = 5;
|
||||
// Linux-specific status to a pod sandbox.
|
||||
optional LinuxPodSandboxStatus linux = 6;
|
||||
LinuxPodSandboxStatus linux = 6;
|
||||
// Labels are key-value pairs that may be used to scope and select individual resources.
|
||||
map<string, string> labels = 7;
|
||||
// Unstructured key-value map holding arbitrary metadata.
|
||||
|
@ -374,16 +390,22 @@ message PodSandboxStatus {
|
|||
|
||||
message PodSandboxStatusResponse {
|
||||
// Status of the PodSandbox.
|
||||
optional PodSandboxStatus status = 1;
|
||||
PodSandboxStatus status = 1;
|
||||
}
|
||||
|
||||
// PodSandboxStateValue is the wrapper of PodSandboxState.
|
||||
message PodSandboxStateValue {
|
||||
// State of the sandbox.
|
||||
PodSandboxState state = 1;
|
||||
}
|
||||
|
||||
// PodSandboxFilter is used to filter a list of PodSandboxes.
|
||||
// All those fields are combined with 'AND'
|
||||
message PodSandboxFilter {
|
||||
// ID of the sandbox.
|
||||
optional string id = 1;
|
||||
string id = 1;
|
||||
// State of the sandbox.
|
||||
optional PodSandboxState state = 2;
|
||||
PodSandboxStateValue state = 2;
|
||||
// LabelSelector to select matches.
|
||||
// Only api.MatchLabels is supported for now and the requirements
|
||||
// are ANDed. MatchExpressions is not supported yet.
|
||||
|
@ -392,20 +414,20 @@ message PodSandboxFilter {
|
|||
|
||||
message ListPodSandboxRequest {
|
||||
// PodSandboxFilter to filter a list of PodSandboxes.
|
||||
optional PodSandboxFilter filter = 1;
|
||||
PodSandboxFilter filter = 1;
|
||||
}
|
||||
|
||||
|
||||
// PodSandbox contains minimal information about a sandbox.
|
||||
message PodSandbox {
|
||||
// ID of the PodSandbox.
|
||||
optional string id = 1;
|
||||
string id = 1;
|
||||
// Metadata of the PodSandbox.
|
||||
optional PodSandboxMetadata metadata = 2;
|
||||
PodSandboxMetadata metadata = 2;
|
||||
// State of the PodSandbox.
|
||||
optional PodSandboxState state = 3;
|
||||
// Creation timestamps of the PodSandbox in nanoseconds.
|
||||
optional int64 created_at = 4;
|
||||
PodSandboxState state = 3;
|
||||
// Creation timestamps of the PodSandbox in nanoseconds. Must be > 0.
|
||||
int64 created_at = 4;
|
||||
// Labels of the PodSandbox.
|
||||
map<string, string> labels = 5;
|
||||
// Unstructured key-value map holding arbitrary metadata.
|
||||
|
@ -424,12 +446,12 @@ message ListPodSandboxResponse {
|
|||
// value of a Container's Image field (e.g. imageID or imageDigest), but in the
|
||||
// future it will include more detailed information about the different image types.
|
||||
message ImageSpec {
|
||||
optional string image = 1;
|
||||
string image = 1;
|
||||
}
|
||||
|
||||
message KeyValue {
|
||||
optional string key = 1;
|
||||
optional string value = 2;
|
||||
string key = 1;
|
||||
string value = 2;
|
||||
}
|
||||
|
||||
// LinuxContainerResources specifies Linux specific configuration for
|
||||
|
@ -437,24 +459,24 @@ message KeyValue {
|
|||
// TODO: Consider using Resources from opencontainers/runtime-spec/specs-go
|
||||
// directly.
|
||||
message LinuxContainerResources {
|
||||
// CPU CFS (Completely Fair Scheduler) period.
|
||||
optional int64 cpu_period = 1;
|
||||
// CPU CFS (Completely Fair Scheduler) quota.
|
||||
optional int64 cpu_quota = 2;
|
||||
// CPU shares (relative weight vs. other containers).
|
||||
optional int64 cpu_shares = 3;
|
||||
// Memory limit in bytes.
|
||||
optional int64 memory_limit_in_bytes = 4;
|
||||
// OOMScoreAdj adjusts the oom-killer score.
|
||||
optional int64 oom_score_adj = 5;
|
||||
// CPU CFS (Completely Fair Scheduler) period. Default: 0 (not specified).
|
||||
int64 cpu_period = 1;
|
||||
// CPU CFS (Completely Fair Scheduler) quota. Default: 0 (not specified).
|
||||
int64 cpu_quota = 2;
|
||||
// CPU shares (relative weight vs. other containers). Default: 0 (not specified).
|
||||
int64 cpu_shares = 3;
|
||||
// Memory limit in bytes. Default: 0 (not specified).
|
||||
int64 memory_limit_in_bytes = 4;
|
||||
// OOMScoreAdj adjusts the oom-killer score. Default: 0 (not specified).
|
||||
int64 oom_score_adj = 5;
|
||||
}
|
||||
|
||||
// SELinuxOption are the labels to be applied to the container.
|
||||
message SELinuxOption {
|
||||
optional string user = 1;
|
||||
optional string role = 2;
|
||||
optional string type = 3;
|
||||
optional string level = 4;
|
||||
string user = 1;
|
||||
string role = 2;
|
||||
string type = 3;
|
||||
string level = 4;
|
||||
}
|
||||
|
||||
// Capability contains the container capabilities to add or drop
|
||||
|
@ -468,7 +490,7 @@ message Capability {
|
|||
// LinuxContainerSecurityContext holds linux security configuration that will be applied to a container.
|
||||
message LinuxContainerSecurityContext {
|
||||
// Capabilities to add or drop.
|
||||
optional Capability capabilities = 1;
|
||||
Capability capabilities = 1;
|
||||
// If set, run container in privileged mode.
|
||||
// Privileged mode is incompatible with the following options. If
|
||||
// privileged is set, the following features MAY have no effect:
|
||||
|
@ -486,21 +508,21 @@ message LinuxContainerSecurityContext {
|
|||
// 6. The device cgroup does not restrict access to any devices.
|
||||
// 7. All devices from the host's /dev are available within the container.
|
||||
// 8. SELinux restrictions are not applied (e.g. label=disabled).
|
||||
optional bool privileged = 2;
|
||||
bool privileged = 2;
|
||||
// Configurations for the container's namespaces.
|
||||
// Only used if the container uses namespace for isolation.
|
||||
optional NamespaceOption namespace_options = 3;
|
||||
NamespaceOption namespace_options = 3;
|
||||
// SELinux context to be optionally applied.
|
||||
optional SELinuxOption selinux_options = 4;
|
||||
SELinuxOption selinux_options = 4;
|
||||
// UID to run the container process as. Only one of run_as_user and
|
||||
// run_as_username can be specified at a time.
|
||||
optional int64 run_as_user = 5;
|
||||
Int64Value run_as_user = 5;
|
||||
// User name to run the container process as. If specified, the user MUST
|
||||
// exist in the container image (i.e. in the /etc/passwd inside the image),
|
||||
// and be resolved there by the runtime; otherwise, the runtime MUST error.
|
||||
optional string run_as_username = 6;
|
||||
string run_as_username = 6;
|
||||
// If set, the root filesystem of the container is read-only.
|
||||
optional bool readonly_rootfs = 7;
|
||||
bool readonly_rootfs = 7;
|
||||
// List of groups applied to the first process run in the container, in
|
||||
// addition to the container's primary GID.
|
||||
repeated int64 supplemental_groups = 8;
|
||||
|
@ -510,9 +532,9 @@ message LinuxContainerSecurityContext {
|
|||
// Linux-based containers.
|
||||
message LinuxContainerConfig {
|
||||
// Resources specification for the container.
|
||||
optional LinuxContainerResources resources = 1;
|
||||
LinuxContainerResources resources = 1;
|
||||
// LinuxContainerSecurityContext configuration for the container.
|
||||
optional LinuxContainerSecurityContext security_context = 2;
|
||||
LinuxContainerSecurityContext security_context = 2;
|
||||
}
|
||||
|
||||
// ContainerMetadata holds all necessary information for building the container
|
||||
|
@ -522,22 +544,22 @@ message LinuxContainerConfig {
|
|||
// within a sandbox for the entire lifetime of the sandbox.
|
||||
message ContainerMetadata {
|
||||
// Name of the container. Same as the container name in the PodSpec.
|
||||
optional string name = 1;
|
||||
// Attempt number of creating the container.
|
||||
optional uint32 attempt = 2;
|
||||
string name = 1;
|
||||
// Attempt number of creating the container. Default: 0.
|
||||
uint32 attempt = 2;
|
||||
}
|
||||
|
||||
// Device specifies a host device to mount into a container.
|
||||
message Device {
|
||||
// Path of the device within the container.
|
||||
optional string container_path = 1;
|
||||
string container_path = 1;
|
||||
// Path of the device on the host.
|
||||
optional string host_path = 2;
|
||||
string host_path = 2;
|
||||
// Cgroups permissions of the device, candidates are one or more of
|
||||
// * r - allows container to read from the specified device.
|
||||
// * w - allows container to write to the specified device.
|
||||
// * m - allows container to create device files that do not yet exist.
|
||||
optional string permissions = 3;
|
||||
string permissions = 3;
|
||||
}
|
||||
|
||||
// ContainerConfig holds all the required and optional fields for creating a
|
||||
|
@ -547,15 +569,15 @@ message ContainerConfig {
|
|||
// container, and the runtime should leverage this to ensure correct
|
||||
// operation. The runtime may also use this information to improve UX, such
|
||||
// as by constructing a readable name.
|
||||
optional ContainerMetadata metadata = 1 ;
|
||||
ContainerMetadata metadata = 1 ;
|
||||
// Image to use.
|
||||
optional ImageSpec image = 2;
|
||||
ImageSpec image = 2;
|
||||
// Command to execute (i.e., entrypoint for docker)
|
||||
repeated string command = 3;
|
||||
// Args for the Command (i.e., command for docker)
|
||||
repeated string args = 4;
|
||||
// Current working directory of the command.
|
||||
optional string working_dir = 5;
|
||||
string working_dir = 5;
|
||||
// List of environment variable to set in the container.
|
||||
repeated KeyValue envs = 6;
|
||||
// Mounts for the container.
|
||||
|
@ -590,56 +612,57 @@ message ContainerConfig {
|
|||
// container logs are under active discussion in
|
||||
// https://issues.k8s.io/24677. There *may* be future change of direction
|
||||
// for logging as the discussion carries on.
|
||||
optional string log_path = 11;
|
||||
string log_path = 11;
|
||||
|
||||
// Variables for interactive containers, these have very specialized
|
||||
// use-cases (e.g. debugging).
|
||||
// TODO: Determine if we need to continue supporting these fields that are
|
||||
// part of Kubernetes's Container Spec.
|
||||
optional bool stdin = 12;
|
||||
optional bool stdin_once = 13;
|
||||
optional bool tty = 14;
|
||||
bool stdin = 12;
|
||||
bool stdin_once = 13;
|
||||
bool tty = 14;
|
||||
|
||||
// Configuration specific to Linux containers.
|
||||
optional LinuxContainerConfig linux = 15;
|
||||
LinuxContainerConfig linux = 15;
|
||||
}
|
||||
|
||||
message CreateContainerRequest {
|
||||
// ID of the PodSandbox in which the container should be created.
|
||||
optional string pod_sandbox_id = 1;
|
||||
string pod_sandbox_id = 1;
|
||||
// Config of the container.
|
||||
optional ContainerConfig config = 2;
|
||||
ContainerConfig config = 2;
|
||||
// Config of the PodSandbox. This is the same config that was passed
|
||||
// to RunPodSandboxRequest to create the PodSandbox. It is passed again
|
||||
// here just for easy reference. The PodSandboxConfig is immutable and
|
||||
// remains the same throughout the lifetime of the pod.
|
||||
optional PodSandboxConfig sandbox_config = 3;
|
||||
PodSandboxConfig sandbox_config = 3;
|
||||
}
|
||||
|
||||
message CreateContainerResponse {
|
||||
// ID of the created container.
|
||||
optional string container_id = 1;
|
||||
string container_id = 1;
|
||||
}
|
||||
|
||||
message StartContainerRequest {
|
||||
// ID of the container to start.
|
||||
optional string container_id = 1;
|
||||
string container_id = 1;
|
||||
}
|
||||
|
||||
message StartContainerResponse {}
|
||||
|
||||
message StopContainerRequest {
|
||||
// ID of the container to stop.
|
||||
optional string container_id = 1;
|
||||
// Timeout, in seconds, to stop the container.
|
||||
optional int64 timeout = 2;
|
||||
string container_id = 1;
|
||||
// Timeout in seconds to wait for the container to stop before forcibly
|
||||
// terminating it. Default: 0 (forcibly terminate the container immediately)
|
||||
int64 timeout = 2;
|
||||
}
|
||||
|
||||
message StopContainerResponse {}
|
||||
|
||||
message RemoveContainerRequest {
|
||||
// ID of the container to remove.
|
||||
optional string container_id = 1;
|
||||
string container_id = 1;
|
||||
}
|
||||
|
||||
message RemoveContainerResponse {}
|
||||
|
@ -651,15 +674,21 @@ enum ContainerState {
|
|||
CONTAINER_UNKNOWN = 3;
|
||||
}
|
||||
|
||||
// ContainerStateValue is the wrapper of ContainerState.
|
||||
message ContainerStateValue {
|
||||
// State of the container.
|
||||
ContainerState state = 1;
|
||||
}
|
||||
|
||||
// ContainerFilter is used to filter containers.
|
||||
// All those fields are combined with 'AND'
|
||||
message ContainerFilter {
|
||||
// ID of the container.
|
||||
optional string id = 1;
|
||||
string id = 1;
|
||||
// State of the container.
|
||||
optional ContainerState state = 2;
|
||||
ContainerStateValue state = 2;
|
||||
// ID of the PodSandbox.
|
||||
optional string pod_sandbox_id = 3;
|
||||
string pod_sandbox_id = 3;
|
||||
// LabelSelector to select matches.
|
||||
// Only api.MatchLabels is supported for now and the requirements
|
||||
// are ANDed. MatchExpressions is not supported yet.
|
||||
|
@ -667,7 +696,7 @@ message ContainerFilter {
|
|||
}
|
||||
|
||||
message ListContainersRequest {
|
||||
optional ContainerFilter filter = 1;
|
||||
ContainerFilter filter = 1;
|
||||
}
|
||||
|
||||
// Container provides the runtime information for a container, such as ID, hash,
|
||||
|
@ -675,20 +704,20 @@ message ListContainersRequest {
|
|||
message Container {
|
||||
// ID of the container, used by the container runtime to identify
|
||||
// a container.
|
||||
optional string id = 1;
|
||||
string id = 1;
|
||||
// ID of the sandbox to which this container belongs.
|
||||
optional string pod_sandbox_id = 2;
|
||||
string pod_sandbox_id = 2;
|
||||
// Metadata of the container.
|
||||
optional ContainerMetadata metadata = 3;
|
||||
ContainerMetadata metadata = 3;
|
||||
// Spec of the image.
|
||||
optional ImageSpec image = 4;
|
||||
ImageSpec image = 4;
|
||||
// Reference to the image in use. For most runtimes, this should be an
|
||||
// image ID.
|
||||
optional string image_ref = 5;
|
||||
string image_ref = 5;
|
||||
// State of the container.
|
||||
optional ContainerState state = 6;
|
||||
ContainerState state = 6;
|
||||
// Creation time of the container in nanoseconds.
|
||||
optional int64 created_at = 7;
|
||||
int64 created_at = 7;
|
||||
// Key-value pairs that may be used to scope and select individual resources.
|
||||
map<string, string> labels = 8;
|
||||
// Unstructured key-value map holding arbitrary metadata.
|
||||
|
@ -705,35 +734,35 @@ message ListContainersResponse {
|
|||
|
||||
message ContainerStatusRequest {
|
||||
// ID of the container for which to retrieve status.
|
||||
optional string container_id = 1;
|
||||
string container_id = 1;
|
||||
}
|
||||
|
||||
// ContainerStatus represents the status of a container.
|
||||
message ContainerStatus {
|
||||
// ID of the container.
|
||||
optional string id = 1;
|
||||
string id = 1;
|
||||
// Metadata of the container.
|
||||
optional ContainerMetadata metadata = 2;
|
||||
ContainerMetadata metadata = 2;
|
||||
// Status of the container.
|
||||
optional ContainerState state = 3;
|
||||
ContainerState state = 3;
|
||||
// Creation time of the container in nanoseconds.
|
||||
optional int64 created_at = 4;
|
||||
// Start time of the container in nanoseconds.
|
||||
optional int64 started_at = 5;
|
||||
// Finish time of the container in nanoseconds.
|
||||
optional int64 finished_at = 6;
|
||||
// Exit code of the container.
|
||||
optional int32 exit_code = 7;
|
||||
int64 created_at = 4;
|
||||
// Start time of the container in nanoseconds. Default: 0 (not specified).
|
||||
int64 started_at = 5;
|
||||
// Finish time of the container in nanoseconds. Default: 0 (not specified).
|
||||
int64 finished_at = 6;
|
||||
// Exit code of the container. Only required when finished_at != 0. Default: 0.
|
||||
int32 exit_code = 7;
|
||||
// Spec of the image.
|
||||
optional ImageSpec image = 8;
|
||||
ImageSpec image = 8;
|
||||
// Reference to the image in use. For most runtimes, this should be an
|
||||
// image ID
|
||||
optional string image_ref = 9;
|
||||
string image_ref = 9;
|
||||
// Brief CamelCase string explaining why container is in its current state.
|
||||
optional string reason = 10;
|
||||
string reason = 10;
|
||||
// Human-readable message indicating details about why container is in its
|
||||
// current state.
|
||||
optional string message = 11;
|
||||
string message = 11;
|
||||
// Key-value pairs that may be used to scope and select individual resources.
|
||||
map<string,string> labels = 12;
|
||||
// Unstructured key-value map holding arbitrary metadata.
|
||||
|
@ -747,97 +776,97 @@ message ContainerStatus {
|
|||
|
||||
message ContainerStatusResponse {
|
||||
// Status of the container.
|
||||
optional ContainerStatus status = 1;
|
||||
ContainerStatus status = 1;
|
||||
}
|
||||
|
||||
message ExecSyncRequest {
|
||||
// ID of the container.
|
||||
optional string container_id = 1;
|
||||
string container_id = 1;
|
||||
// Command to execute.
|
||||
repeated string cmd = 2;
|
||||
// Timeout in seconds to stop the command. Default: run forever.
|
||||
optional int64 timeout = 3;
|
||||
// Timeout in seconds to stop the command. Default: 0 (run forever).
|
||||
int64 timeout = 3;
|
||||
}
|
||||
|
||||
message ExecSyncResponse {
|
||||
// Captured command stdout output.
|
||||
optional bytes stdout = 1;
|
||||
bytes stdout = 1;
|
||||
// Captured command stderr output.
|
||||
optional bytes stderr = 2;
|
||||
// Exit code the command finished with.
|
||||
optional int32 exit_code = 3;
|
||||
bytes stderr = 2;
|
||||
// Exit code the command finished with. Default: 0 (success).
|
||||
int32 exit_code = 3;
|
||||
}
|
||||
|
||||
message ExecRequest {
|
||||
// ID of the container in which to execute the command.
|
||||
optional string container_id = 1;
|
||||
string container_id = 1;
|
||||
// Command to execute.
|
||||
repeated string cmd = 2;
|
||||
// Whether to exec the command in a TTY.
|
||||
optional bool tty = 3;
|
||||
bool tty = 3;
|
||||
// Whether to stream stdin.
|
||||
optional bool stdin = 4;
|
||||
bool stdin = 4;
|
||||
}
|
||||
|
||||
message ExecResponse {
|
||||
// Fully qualified URL of the exec streaming server.
|
||||
optional string url = 1;
|
||||
string url = 1;
|
||||
}
|
||||
|
||||
message AttachRequest {
|
||||
// ID of the container to which to attach.
|
||||
optional string container_id = 1;
|
||||
string container_id = 1;
|
||||
// Whether to stream stdin.
|
||||
optional bool stdin = 2;
|
||||
bool stdin = 2;
|
||||
// Whether the process being attached is running in a TTY.
|
||||
// This must match the TTY setting in the ContainerConfig.
|
||||
optional bool tty = 3;
|
||||
bool tty = 3;
|
||||
}
|
||||
|
||||
message AttachResponse {
|
||||
// Fully qualified URL of the attach streaming server.
|
||||
optional string url = 1;
|
||||
string url = 1;
|
||||
}
|
||||
|
||||
message PortForwardRequest {
|
||||
// ID of the container to which to forward the port.
|
||||
optional string pod_sandbox_id = 1;
|
||||
string pod_sandbox_id = 1;
|
||||
// Port to forward.
|
||||
repeated int32 port = 2;
|
||||
}
|
||||
|
||||
message PortForwardResponse {
|
||||
// Fully qualified URL of the port-forward streaming server.
|
||||
optional string url = 1;
|
||||
string url = 1;
|
||||
}
|
||||
|
||||
message ImageFilter {
|
||||
// Spec of the image.
|
||||
optional ImageSpec image = 1;
|
||||
ImageSpec image = 1;
|
||||
}
|
||||
|
||||
message ListImagesRequest {
|
||||
// Filter to list images.
|
||||
optional ImageFilter filter = 1;
|
||||
ImageFilter filter = 1;
|
||||
}
|
||||
|
||||
// Basic information about a container image.
|
||||
message Image {
|
||||
// ID of the image.
|
||||
optional string id = 1;
|
||||
string id = 1;
|
||||
// Other names by which this image is known.
|
||||
repeated string repo_tags = 2;
|
||||
// Digests by which this image is known.
|
||||
repeated string repo_digests = 3;
|
||||
// Size of the image in bytes.
|
||||
optional uint64 size = 4;
|
||||
// Size of the image in bytes. Must be > 0.
|
||||
uint64 size = 4;
|
||||
// UID that will run the command(s). This is used as a default if no user is
|
||||
// specified when creating the container. UID and the following user name
|
||||
// are mutually exclusive.
|
||||
optional int64 uid = 5;
|
||||
Int64Value uid = 5;
|
||||
// User name that will run the command(s). This is used if UID is not set
|
||||
// and no user is specified when creating container.
|
||||
optional string username = 6;
|
||||
string username = 6;
|
||||
}
|
||||
|
||||
message ListImagesResponse {
|
||||
|
@ -847,67 +876,67 @@ message ListImagesResponse {
|
|||
|
||||
message ImageStatusRequest {
|
||||
// Spec of the image.
|
||||
optional ImageSpec image = 1;
|
||||
ImageSpec image = 1;
|
||||
}
|
||||
|
||||
message ImageStatusResponse {
|
||||
// Status of the image.
|
||||
optional Image image = 1;
|
||||
Image image = 1;
|
||||
}
|
||||
|
||||
// AuthConfig contains authorization information for connecting to a registry.
|
||||
message AuthConfig {
|
||||
optional string username = 1;
|
||||
optional string password = 2;
|
||||
optional string auth = 3;
|
||||
optional string server_address = 4;
|
||||
string username = 1;
|
||||
string password = 2;
|
||||
string auth = 3;
|
||||
string server_address = 4;
|
||||
// IdentityToken is used to authenticate the user and get
|
||||
// an access token for the registry.
|
||||
optional string identity_token = 5;
|
||||
string identity_token = 5;
|
||||
// RegistryToken is a bearer token to be sent to a registry
|
||||
optional string registry_token = 6;
|
||||
string registry_token = 6;
|
||||
}
|
||||
|
||||
message PullImageRequest {
|
||||
// Spec of the image.
|
||||
optional ImageSpec image = 1;
|
||||
ImageSpec image = 1;
|
||||
// Authentication configuration for pulling the image.
|
||||
optional AuthConfig auth = 2;
|
||||
AuthConfig auth = 2;
|
||||
// Config of the PodSandbox, which is used to pull image in PodSandbox context.
|
||||
optional PodSandboxConfig sandbox_config = 3;
|
||||
PodSandboxConfig sandbox_config = 3;
|
||||
}
|
||||
|
||||
message PullImageResponse {
|
||||
// Reference to the image in use. For most runtimes, this should be an
|
||||
// image ID or digest.
|
||||
optional string image_ref = 1;
|
||||
string image_ref = 1;
|
||||
}
|
||||
|
||||
message RemoveImageRequest {
|
||||
// Spec of the image to remove.
|
||||
optional ImageSpec image = 1;
|
||||
ImageSpec image = 1;
|
||||
}
|
||||
|
||||
message RemoveImageResponse {}
|
||||
|
||||
message NetworkConfig {
|
||||
// CIDR to use for pod IP addresses.
|
||||
optional string pod_cidr = 1;
|
||||
string pod_cidr = 1;
|
||||
}
|
||||
|
||||
message RuntimeConfig {
|
||||
optional NetworkConfig network_config = 1;
|
||||
NetworkConfig network_config = 1;
|
||||
}
|
||||
|
||||
message UpdateRuntimeConfigRequest {
|
||||
optional RuntimeConfig runtime_config = 1;
|
||||
RuntimeConfig runtime_config = 1;
|
||||
}
|
||||
|
||||
message UpdateRuntimeConfigResponse {}
|
||||
|
||||
// RuntimeCondition contains condition information for the runtime.
|
||||
// There are 2 kinds of runtime conditions:
|
||||
// 1. Required condtitions: Conditions are required for kubelet to work
|
||||
// 1. Required conditions: Conditions are required for kubelet to work
|
||||
// properly. If any required condition is unmet, the node will be not ready.
|
||||
// The required conditions include:
|
||||
// * RuntimeReady: RuntimeReady means the runtime is up and ready to accept
|
||||
|
@ -920,13 +949,13 @@ message UpdateRuntimeConfigResponse {}
|
|||
// them understand the status of the system.
|
||||
message RuntimeCondition {
|
||||
// Type of runtime condition.
|
||||
optional string type = 1;
|
||||
// Status of the condition, one of true/false.
|
||||
optional bool status = 2;
|
||||
string type = 1;
|
||||
// Status of the condition, one of true/false. Default: false.
|
||||
bool status = 2;
|
||||
// Brief CamelCase string containing reason for the condition's last transition.
|
||||
optional string reason = 3;
|
||||
string reason = 3;
|
||||
// Human-readable message indicating details about last transition.
|
||||
optional string message = 4;
|
||||
string message = 4;
|
||||
}
|
||||
|
||||
// RuntimeStatus is information about the current status of the runtime.
|
||||
|
@ -939,5 +968,5 @@ message StatusRequest {}
|
|||
|
||||
message StatusResponse {
|
||||
// Status of the Runtime.
|
||||
optional RuntimeStatus status = 1;
|
||||
RuntimeStatus status = 1;
|
||||
}
|
||||
|
|
6
vendor/k8s.io/kubernetes/pkg/kubelet/client/BUILD
generated
vendored
6
vendor/k8s.io/kubernetes/pkg/kubelet/client/BUILD
generated
vendored
|
@ -14,12 +14,12 @@ go_library(
|
|||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//pkg/client/restclient:go_default_library",
|
||||
"//pkg/client/transport:go_default_library",
|
||||
"//pkg/util/node:go_default_library",
|
||||
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
|
||||
"//vendor:k8s.io/apimachinery/pkg/types",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/net",
|
||||
"//vendor:k8s.io/client-go/rest",
|
||||
"//vendor:k8s.io/client-go/transport",
|
||||
],
|
||||
)
|
||||
|
||||
|
@ -33,7 +33,7 @@ go_test(
|
|||
],
|
||||
deps = [
|
||||
"//pkg/client/clientset_generated/clientset/typed/core/v1:go_default_library",
|
||||
"//pkg/client/restclient:go_default_library",
|
||||
"//vendor:k8s.io/client-go/rest",
|
||||
],
|
||||
)
|
||||
|
||||
|
|
4
vendor/k8s.io/kubernetes/pkg/kubelet/client/kubelet_client.go
generated
vendored
4
vendor/k8s.io/kubernetes/pkg/kubelet/client/kubelet_client.go
generated
vendored
|
@ -24,9 +24,9 @@ import (
|
|||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
utilnet "k8s.io/apimachinery/pkg/util/net"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/transport"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/client/restclient"
|
||||
"k8s.io/kubernetes/pkg/client/transport"
|
||||
nodeutil "k8s.io/kubernetes/pkg/util/node"
|
||||
)
|
||||
|
||||
|
|
2
vendor/k8s.io/kubernetes/pkg/kubelet/client/kubelet_client_test.go
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/kubelet/client/kubelet_client_test.go
generated
vendored
|
@ -19,8 +19,8 @@ package client
|
|||
import (
|
||||
"testing"
|
||||
|
||||
restclient "k8s.io/client-go/rest"
|
||||
v1core "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1"
|
||||
"k8s.io/kubernetes/pkg/client/restclient"
|
||||
)
|
||||
|
||||
// Ensure a node client can be used as a NodeGetter.
|
||||
|
|
1
vendor/k8s.io/kubernetes/pkg/kubelet/cm/container_manager_linux_test.go
generated
vendored
1
vendor/k8s.io/kubernetes/pkg/kubelet/cm/container_manager_linux_test.go
generated
vendored
|
@ -152,6 +152,7 @@ func TestSoftRequirementsValidationSuccess(t *testing.T) {
|
|||
req := require.New(t)
|
||||
tempDir, err := ioutil.TempDir("", "")
|
||||
req.NoError(err)
|
||||
defer os.RemoveAll(tempDir)
|
||||
req.NoError(ioutil.WriteFile(path.Join(tempDir, "cpu.cfs_period_us"), []byte("0"), os.ModePerm))
|
||||
req.NoError(ioutil.WriteFile(path.Join(tempDir, "cpu.cfs_quota_us"), []byte("0"), os.ModePerm))
|
||||
mountInt := &fakeMountInterface{
|
||||
|
|
5
vendor/k8s.io/kubernetes/pkg/kubelet/config/BUILD
generated
vendored
5
vendor/k8s.io/kubernetes/pkg/kubelet/config/BUILD
generated
vendored
|
@ -29,7 +29,6 @@ go_library(
|
|||
"//pkg/client/cache:go_default_library",
|
||||
"//pkg/client/clientset_generated/clientset:go_default_library",
|
||||
"//pkg/client/record:go_default_library",
|
||||
"//pkg/fields:go_default_library",
|
||||
"//pkg/kubelet/container:go_default_library",
|
||||
"//pkg/kubelet/events:go_default_library",
|
||||
"//pkg/kubelet/types:go_default_library",
|
||||
|
@ -38,6 +37,8 @@ go_library(
|
|||
"//pkg/util/hash:go_default_library",
|
||||
"//vendor:github.com/golang/glog",
|
||||
"//vendor:golang.org/x/exp/inotify",
|
||||
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
|
||||
"//vendor:k8s.io/apimachinery/pkg/fields",
|
||||
"//vendor:k8s.io/apimachinery/pkg/runtime",
|
||||
"//vendor:k8s.io/apimachinery/pkg/types",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/sets",
|
||||
|
@ -67,12 +68,12 @@ go_test(
|
|||
"//pkg/client/record:go_default_library",
|
||||
"//pkg/kubelet/types:go_default_library",
|
||||
"//pkg/securitycontext:go_default_library",
|
||||
"//pkg/util/testing:go_default_library",
|
||||
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
|
||||
"//vendor:k8s.io/apimachinery/pkg/runtime",
|
||||
"//vendor:k8s.io/apimachinery/pkg/types",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/wait",
|
||||
"//vendor:k8s.io/apimachinery/pkg/watch",
|
||||
"//vendor:k8s.io/client-go/util/testing",
|
||||
],
|
||||
)
|
||||
|
||||
|
|
5
vendor/k8s.io/kubernetes/pkg/kubelet/config/apiserver.go
generated
vendored
5
vendor/k8s.io/kubernetes/pkg/kubelet/config/apiserver.go
generated
vendored
|
@ -18,18 +18,19 @@ limitations under the License.
|
|||
package config
|
||||
|
||||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||
"k8s.io/kubernetes/pkg/fields"
|
||||
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
)
|
||||
|
||||
// NewSourceApiserver creates a config source that watches and pulls from the apiserver.
|
||||
func NewSourceApiserver(c *clientset.Clientset, nodeName types.NodeName, updates chan<- interface{}) {
|
||||
lw := cache.NewListWatchFromClient(c.Core().RESTClient(), "pods", v1.NamespaceAll, fields.OneTermEqualSelector(api.PodHostField, string(nodeName)))
|
||||
lw := cache.NewListWatchFromClient(c.Core().RESTClient(), "pods", metav1.NamespaceAll, fields.OneTermEqualSelector(api.PodHostField, string(nodeName)))
|
||||
newSourceApiserverFromLW(lw, updates)
|
||||
}
|
||||
|
||||
|
|
15
vendor/k8s.io/kubernetes/pkg/kubelet/config/apiserver_test.go
generated
vendored
15
vendor/k8s.io/kubernetes/pkg/kubelet/config/apiserver_test.go
generated
vendored
|
@ -19,6 +19,7 @@ package config
|
|||
import (
|
||||
"testing"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
|
@ -32,11 +33,11 @@ type fakePodLW struct {
|
|||
watchResp watch.Interface
|
||||
}
|
||||
|
||||
func (lw fakePodLW) List(options v1.ListOptions) (runtime.Object, error) {
|
||||
func (lw fakePodLW) List(options metav1.ListOptions) (runtime.Object, error) {
|
||||
return lw.listResp, nil
|
||||
}
|
||||
|
||||
func (lw fakePodLW) Watch(options v1.ListOptions) (watch.Interface, error) {
|
||||
func (lw fakePodLW) Watch(options metav1.ListOptions) (watch.Interface, error) {
|
||||
return lw.watchResp, nil
|
||||
}
|
||||
|
||||
|
@ -44,13 +45,13 @@ var _ cache.ListerWatcher = fakePodLW{}
|
|||
|
||||
func TestNewSourceApiserver_UpdatesAndMultiplePods(t *testing.T) {
|
||||
pod1v1 := &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{Name: "p"},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "p"},
|
||||
Spec: v1.PodSpec{Containers: []v1.Container{{Image: "image/one"}}}}
|
||||
pod1v2 := &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{Name: "p"},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "p"},
|
||||
Spec: v1.PodSpec{Containers: []v1.Container{{Image: "image/two"}}}}
|
||||
pod2 := &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{Name: "q"},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "q"},
|
||||
Spec: v1.PodSpec{Containers: []v1.Container{{Image: "image/blah"}}}}
|
||||
|
||||
// Setup fake api client.
|
||||
|
@ -130,10 +131,10 @@ func TestNewSourceApiserver_UpdatesAndMultiplePods(t *testing.T) {
|
|||
|
||||
func TestNewSourceApiserver_TwoNamespacesSameName(t *testing.T) {
|
||||
pod1 := v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{Name: "p", Namespace: "one"},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "p", Namespace: "one"},
|
||||
Spec: v1.PodSpec{Containers: []v1.Container{{Image: "image/one"}}}}
|
||||
pod2 := v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{Name: "p", Namespace: "two"},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "p", Namespace: "two"},
|
||||
Spec: v1.PodSpec{Containers: []v1.Container{{Image: "image/blah"}}}}
|
||||
|
||||
// Setup fake api client.
|
||||
|
|
5
vendor/k8s.io/kubernetes/pkg/kubelet/config/common.go
generated
vendored
5
vendor/k8s.io/kubernetes/pkg/kubelet/config/common.go
generated
vendored
|
@ -22,6 +22,7 @@ import (
|
|||
"encoding/hex"
|
||||
"fmt"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
utilyaml "k8s.io/apimachinery/pkg/util/yaml"
|
||||
|
@ -57,7 +58,7 @@ func applyDefaults(pod *api.Pod, source string, isFile bool, nodeName types.Node
|
|||
glog.V(5).Infof("Generated Name %q for UID %q from URL %s", pod.Name, pod.UID, source)
|
||||
|
||||
if pod.Namespace == "" {
|
||||
pod.Namespace = kubetypes.NamespaceDefault
|
||||
pod.Namespace = metav1.NamespaceDefault
|
||||
}
|
||||
glog.V(5).Infof("Using namespace %q for pod %q from %s", pod.Namespace, pod.Name, source)
|
||||
|
||||
|
@ -80,7 +81,7 @@ func applyDefaults(pod *api.Pod, source string, isFile bool, nodeName types.Node
|
|||
func getSelfLink(name, namespace string) string {
|
||||
var selfLink string
|
||||
if len(namespace) == 0 {
|
||||
namespace = api.NamespaceDefault
|
||||
namespace = metav1.NamespaceDefault
|
||||
}
|
||||
selfLink = fmt.Sprintf("/api/"+api.Registry.GroupOrDie(api.GroupName).GroupVersion.Version+"/pods/namespaces/%s/%s", name, namespace)
|
||||
return selfLink
|
||||
|
|
29
vendor/k8s.io/kubernetes/pkg/kubelet/config/common_test.go
generated
vendored
29
vendor/k8s.io/kubernetes/pkg/kubelet/config/common_test.go
generated
vendored
|
@ -36,7 +36,7 @@ func TestDecodeSinglePod(t *testing.T) {
|
|||
TypeMeta: metav1.TypeMeta{
|
||||
APIVersion: "",
|
||||
},
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test",
|
||||
UID: "12345",
|
||||
Namespace: "mynamespace",
|
||||
|
@ -46,13 +46,15 @@ func TestDecodeSinglePod(t *testing.T) {
|
|||
DNSPolicy: v1.DNSClusterFirst,
|
||||
TerminationGracePeriodSeconds: &grace,
|
||||
Containers: []v1.Container{{
|
||||
Name: "image",
|
||||
Image: "test/image",
|
||||
ImagePullPolicy: "IfNotPresent",
|
||||
TerminationMessagePath: "/dev/termination-log",
|
||||
SecurityContext: securitycontext.ValidSecurityContextWithContainerDefaults(),
|
||||
Name: "image",
|
||||
Image: "test/image",
|
||||
ImagePullPolicy: "IfNotPresent",
|
||||
TerminationMessagePath: "/dev/termination-log",
|
||||
TerminationMessagePolicy: v1.TerminationMessageReadFile,
|
||||
SecurityContext: securitycontext.ValidSecurityContextWithContainerDefaults(),
|
||||
}},
|
||||
SecurityContext: &v1.PodSecurityContext{},
|
||||
SchedulerName: api.DefaultSchedulerName,
|
||||
},
|
||||
}
|
||||
json, err := runtime.Encode(testapi.Default.Codec(), pod)
|
||||
|
@ -96,7 +98,7 @@ func TestDecodePodList(t *testing.T) {
|
|||
TypeMeta: metav1.TypeMeta{
|
||||
APIVersion: "",
|
||||
},
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test",
|
||||
UID: "12345",
|
||||
Namespace: "mynamespace",
|
||||
|
@ -106,13 +108,16 @@ func TestDecodePodList(t *testing.T) {
|
|||
DNSPolicy: v1.DNSClusterFirst,
|
||||
TerminationGracePeriodSeconds: &grace,
|
||||
Containers: []v1.Container{{
|
||||
Name: "image",
|
||||
Image: "test/image",
|
||||
ImagePullPolicy: "IfNotPresent",
|
||||
TerminationMessagePath: "/dev/termination-log",
|
||||
SecurityContext: securitycontext.ValidSecurityContextWithContainerDefaults(),
|
||||
Name: "image",
|
||||
Image: "test/image",
|
||||
ImagePullPolicy: "IfNotPresent",
|
||||
TerminationMessagePath: "/dev/termination-log",
|
||||
TerminationMessagePolicy: v1.TerminationMessageReadFile,
|
||||
|
||||
SecurityContext: securitycontext.ValidSecurityContextWithContainerDefaults(),
|
||||
}},
|
||||
SecurityContext: &v1.PodSecurityContext{},
|
||||
SchedulerName: api.DefaultSchedulerName,
|
||||
},
|
||||
}
|
||||
podList := &v1.PodList{
|
||||
|
|
23
vendor/k8s.io/kubernetes/pkg/kubelet/config/config_test.go
generated
vendored
23
vendor/k8s.io/kubernetes/pkg/kubelet/config/config_test.go
generated
vendored
|
@ -59,7 +59,7 @@ func (s sortedPods) Less(i, j int) bool {
|
|||
|
||||
func CreateValidPod(name, namespace string) *v1.Pod {
|
||||
return &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
UID: types.UID(name), // for the purpose of testing, this is unique enough
|
||||
Name: name,
|
||||
Namespace: namespace,
|
||||
|
@ -69,10 +69,11 @@ func CreateValidPod(name, namespace string) *v1.Pod {
|
|||
DNSPolicy: v1.DNSClusterFirst,
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "ctr",
|
||||
Image: "image",
|
||||
ImagePullPolicy: "IfNotPresent",
|
||||
SecurityContext: securitycontext.ValidSecurityContextWithContainerDefaults(),
|
||||
Name: "ctr",
|
||||
Image: "image",
|
||||
ImagePullPolicy: "IfNotPresent",
|
||||
SecurityContext: securitycontext.ValidSecurityContextWithContainerDefaults(),
|
||||
TerminationMessagePolicy: v1.TerminationMessageReadFile,
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -186,7 +187,7 @@ func TestInvalidPodFiltered(t *testing.T) {
|
|||
expectPodUpdate(t, ch, CreatePodUpdate(kubetypes.ADD, TestSource, CreateValidPod("foo", "new")))
|
||||
|
||||
// add an invalid update
|
||||
podUpdate = CreatePodUpdate(kubetypes.UPDATE, TestSource, &v1.Pod{ObjectMeta: v1.ObjectMeta{Name: "foo"}})
|
||||
podUpdate = CreatePodUpdate(kubetypes.UPDATE, TestSource, &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "foo"}})
|
||||
channel <- podUpdate
|
||||
expectNoPodUpdate(t, ch)
|
||||
}
|
||||
|
@ -204,7 +205,7 @@ func TestNewPodAddedSnapshotAndUpdates(t *testing.T) {
|
|||
|
||||
// container updates are separated as UPDATE
|
||||
pod := *podUpdate.Pods[0]
|
||||
pod.Spec.Containers = []v1.Container{{Name: "bar", Image: "test", ImagePullPolicy: v1.PullIfNotPresent}}
|
||||
pod.Spec.Containers = []v1.Container{{Name: "bar", Image: "test", ImagePullPolicy: v1.PullIfNotPresent, TerminationMessagePolicy: v1.TerminationMessageReadFile}}
|
||||
channel <- CreatePodUpdate(kubetypes.ADD, TestSource, &pod)
|
||||
expectPodUpdate(t, ch, CreatePodUpdate(kubetypes.UPDATE, TestSource, &pod))
|
||||
}
|
||||
|
@ -222,7 +223,7 @@ func TestNewPodAddedSnapshot(t *testing.T) {
|
|||
|
||||
// container updates are separated as UPDATE
|
||||
pod := *podUpdate.Pods[0]
|
||||
pod.Spec.Containers = []v1.Container{{Name: "bar", Image: "test", ImagePullPolicy: v1.PullIfNotPresent}}
|
||||
pod.Spec.Containers = []v1.Container{{Name: "bar", Image: "test", ImagePullPolicy: v1.PullIfNotPresent, TerminationMessagePolicy: v1.TerminationMessageReadFile}}
|
||||
channel <- CreatePodUpdate(kubetypes.ADD, TestSource, &pod)
|
||||
expectPodUpdate(t, ch, CreatePodUpdate(kubetypes.SET, TestSource, &pod))
|
||||
}
|
||||
|
@ -240,12 +241,12 @@ func TestNewPodAddedUpdatedRemoved(t *testing.T) {
|
|||
|
||||
// an kubetypes.ADD should be converted to kubetypes.UPDATE
|
||||
pod := CreateValidPod("foo", "new")
|
||||
pod.Spec.Containers = []v1.Container{{Name: "bar", Image: "test", ImagePullPolicy: v1.PullIfNotPresent}}
|
||||
pod.Spec.Containers = []v1.Container{{Name: "bar", Image: "test", ImagePullPolicy: v1.PullIfNotPresent, TerminationMessagePolicy: v1.TerminationMessageReadFile}}
|
||||
podUpdate = CreatePodUpdate(kubetypes.ADD, TestSource, pod)
|
||||
channel <- podUpdate
|
||||
expectPodUpdate(t, ch, CreatePodUpdate(kubetypes.UPDATE, TestSource, pod))
|
||||
|
||||
podUpdate = CreatePodUpdate(kubetypes.REMOVE, TestSource, &v1.Pod{ObjectMeta: v1.ObjectMeta{Name: "foo", Namespace: "new"}})
|
||||
podUpdate = CreatePodUpdate(kubetypes.REMOVE, TestSource, &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "new"}})
|
||||
channel <- podUpdate
|
||||
expectPodUpdate(t, ch, CreatePodUpdate(kubetypes.REMOVE, TestSource, pod))
|
||||
}
|
||||
|
@ -282,7 +283,7 @@ func TestNewPodAddedUpdatedSet(t *testing.T) {
|
|||
|
||||
// should be converted to an kubetypes.ADD, kubetypes.REMOVE, and kubetypes.UPDATE
|
||||
pod := CreateValidPod("foo2", "new")
|
||||
pod.Spec.Containers = []v1.Container{{Name: "bar", Image: "test", ImagePullPolicy: v1.PullIfNotPresent}}
|
||||
pod.Spec.Containers = []v1.Container{{Name: "bar", Image: "test", ImagePullPolicy: v1.PullIfNotPresent, TerminationMessagePolicy: v1.TerminationMessageReadFile}}
|
||||
podUpdate = CreatePodUpdate(kubetypes.SET, TestSource, pod, CreateValidPod("foo3", "new"), CreateValidPod("foo4", "new"))
|
||||
channel <- podUpdate
|
||||
expectPodUpdate(t, ch,
|
||||
|
|
16
vendor/k8s.io/kubernetes/pkg/kubelet/config/file_linux_test.go
generated
vendored
16
vendor/k8s.io/kubernetes/pkg/kubelet/config/file_linux_test.go
generated
vendored
|
@ -33,13 +33,13 @@ import (
|
|||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
utiltesting "k8s.io/client-go/util/testing"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/api/validation"
|
||||
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
"k8s.io/kubernetes/pkg/securitycontext"
|
||||
utiltesting "k8s.io/kubernetes/pkg/util/testing"
|
||||
)
|
||||
|
||||
func TestExtractFromNonExistentFile(t *testing.T) {
|
||||
|
@ -180,7 +180,7 @@ func getTestCases(hostname types.NodeName) []*testCase {
|
|||
Kind: "Pod",
|
||||
APIVersion: "",
|
||||
},
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test",
|
||||
UID: "12345",
|
||||
Namespace: "mynamespace",
|
||||
|
@ -188,13 +188,14 @@ func getTestCases(hostname types.NodeName) []*testCase {
|
|||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{Name: "image", Image: "test/image", SecurityContext: securitycontext.ValidSecurityContextWithContainerDefaults()}},
|
||||
SecurityContext: &v1.PodSecurityContext{},
|
||||
SchedulerName: api.DefaultSchedulerName,
|
||||
},
|
||||
Status: v1.PodStatus{
|
||||
Phase: v1.PodPending,
|
||||
},
|
||||
},
|
||||
expected: CreatePodUpdate(kubetypes.SET, kubetypes.FileSource, &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-" + string(hostname),
|
||||
UID: "12345",
|
||||
Namespace: "mynamespace",
|
||||
|
@ -209,10 +210,13 @@ func getTestCases(hostname types.NodeName) []*testCase {
|
|||
Containers: []v1.Container{{
|
||||
Name: "image",
|
||||
Image: "test/image",
|
||||
TerminationMessagePath: "/dev/termination-log",
|
||||
ImagePullPolicy: "Always",
|
||||
SecurityContext: securitycontext.ValidSecurityContextWithContainerDefaults()}},
|
||||
TerminationMessagePath: "/dev/termination-log",
|
||||
ImagePullPolicy: "Always",
|
||||
SecurityContext: securitycontext.ValidSecurityContextWithContainerDefaults(),
|
||||
TerminationMessagePolicy: v1.TerminationMessageReadFile,
|
||||
}},
|
||||
SecurityContext: &v1.PodSecurityContext{},
|
||||
SchedulerName: api.DefaultSchedulerName,
|
||||
},
|
||||
Status: v1.PodStatus{
|
||||
Phase: v1.PodPending,
|
||||
|
|
47
vendor/k8s.io/kubernetes/pkg/kubelet/config/http_test.go
generated
vendored
47
vendor/k8s.io/kubernetes/pkg/kubelet/config/http_test.go
generated
vendored
|
@ -26,12 +26,12 @@ import (
|
|||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
utiltesting "k8s.io/client-go/util/testing"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/api/validation"
|
||||
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
utiltesting "k8s.io/kubernetes/pkg/util/testing"
|
||||
)
|
||||
|
||||
func TestURLErrorNotExistNoUpdate(t *testing.T) {
|
||||
|
@ -138,15 +138,16 @@ func TestExtractPodsFromHTTP(t *testing.T) {
|
|||
Kind: "Pod",
|
||||
APIVersion: "",
|
||||
},
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "foo",
|
||||
UID: "111",
|
||||
Namespace: "mynamespace",
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
NodeName: string(nodeName),
|
||||
Containers: []v1.Container{{Name: "1", Image: "foo", ImagePullPolicy: v1.PullAlways}},
|
||||
Containers: []v1.Container{{Name: "1", Image: "foo", ImagePullPolicy: v1.PullAlways, TerminationMessagePolicy: v1.TerminationMessageReadFile}},
|
||||
SecurityContext: &v1.PodSecurityContext{},
|
||||
SchedulerName: api.DefaultSchedulerName,
|
||||
},
|
||||
Status: v1.PodStatus{
|
||||
Phase: v1.PodPending,
|
||||
|
@ -155,7 +156,7 @@ func TestExtractPodsFromHTTP(t *testing.T) {
|
|||
expected: CreatePodUpdate(kubetypes.SET,
|
||||
kubetypes.HTTPSource,
|
||||
&v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
UID: "111",
|
||||
Name: "foo" + "-" + nodeName,
|
||||
Namespace: "mynamespace",
|
||||
|
@ -168,12 +169,14 @@ func TestExtractPodsFromHTTP(t *testing.T) {
|
|||
DNSPolicy: v1.DNSClusterFirst,
|
||||
SecurityContext: &v1.PodSecurityContext{},
|
||||
TerminationGracePeriodSeconds: &grace,
|
||||
SchedulerName: api.DefaultSchedulerName,
|
||||
|
||||
Containers: []v1.Container{{
|
||||
Name: "1",
|
||||
Image: "foo",
|
||||
TerminationMessagePath: "/dev/termination-log",
|
||||
ImagePullPolicy: "Always",
|
||||
TerminationMessagePath: "/dev/termination-log",
|
||||
ImagePullPolicy: "Always",
|
||||
TerminationMessagePolicy: v1.TerminationMessageReadFile,
|
||||
}},
|
||||
},
|
||||
Status: v1.PodStatus{
|
||||
|
@ -190,28 +193,30 @@ func TestExtractPodsFromHTTP(t *testing.T) {
|
|||
},
|
||||
Items: []v1.Pod{
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "foo",
|
||||
UID: "111",
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
NodeName: nodeName,
|
||||
Containers: []v1.Container{{Name: "1", Image: "foo", ImagePullPolicy: v1.PullAlways}},
|
||||
Containers: []v1.Container{{Name: "1", Image: "foo", ImagePullPolicy: v1.PullAlways, TerminationMessagePolicy: v1.TerminationMessageReadFile}},
|
||||
SecurityContext: &v1.PodSecurityContext{},
|
||||
SchedulerName: api.DefaultSchedulerName,
|
||||
},
|
||||
Status: v1.PodStatus{
|
||||
Phase: v1.PodPending,
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "bar",
|
||||
UID: "222",
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
NodeName: nodeName,
|
||||
Containers: []v1.Container{{Name: "2", Image: "bar:bartag", ImagePullPolicy: ""}},
|
||||
Containers: []v1.Container{{Name: "2", Image: "bar:bartag", ImagePullPolicy: "", TerminationMessagePolicy: v1.TerminationMessageReadFile}},
|
||||
SecurityContext: &v1.PodSecurityContext{},
|
||||
SchedulerName: api.DefaultSchedulerName,
|
||||
},
|
||||
Status: v1.PodStatus{
|
||||
Phase: v1.PodPending,
|
||||
|
@ -222,12 +227,12 @@ func TestExtractPodsFromHTTP(t *testing.T) {
|
|||
expected: CreatePodUpdate(kubetypes.SET,
|
||||
kubetypes.HTTPSource,
|
||||
&v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
UID: "111",
|
||||
Name: "foo" + "-" + nodeName,
|
||||
Namespace: "default",
|
||||
Annotations: map[string]string{kubetypes.ConfigHashAnnotationKey: "111"},
|
||||
SelfLink: getSelfLink("foo-"+nodeName, kubetypes.NamespaceDefault),
|
||||
SelfLink: getSelfLink("foo-"+nodeName, metav1.NamespaceDefault),
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
NodeName: nodeName,
|
||||
|
@ -235,12 +240,14 @@ func TestExtractPodsFromHTTP(t *testing.T) {
|
|||
DNSPolicy: v1.DNSClusterFirst,
|
||||
TerminationGracePeriodSeconds: &grace,
|
||||
SecurityContext: &v1.PodSecurityContext{},
|
||||
SchedulerName: api.DefaultSchedulerName,
|
||||
|
||||
Containers: []v1.Container{{
|
||||
Name: "1",
|
||||
Image: "foo",
|
||||
TerminationMessagePath: "/dev/termination-log",
|
||||
ImagePullPolicy: "Always",
|
||||
TerminationMessagePath: "/dev/termination-log",
|
||||
ImagePullPolicy: "Always",
|
||||
TerminationMessagePolicy: v1.TerminationMessageReadFile,
|
||||
}},
|
||||
},
|
||||
Status: v1.PodStatus{
|
||||
|
@ -248,12 +255,12 @@ func TestExtractPodsFromHTTP(t *testing.T) {
|
|||
},
|
||||
},
|
||||
&v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
UID: "222",
|
||||
Name: "bar" + "-" + nodeName,
|
||||
Namespace: "default",
|
||||
Annotations: map[string]string{kubetypes.ConfigHashAnnotationKey: "222"},
|
||||
SelfLink: getSelfLink("bar-"+nodeName, kubetypes.NamespaceDefault),
|
||||
SelfLink: getSelfLink("bar-"+nodeName, metav1.NamespaceDefault),
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
NodeName: nodeName,
|
||||
|
@ -261,12 +268,14 @@ func TestExtractPodsFromHTTP(t *testing.T) {
|
|||
DNSPolicy: v1.DNSClusterFirst,
|
||||
TerminationGracePeriodSeconds: &grace,
|
||||
SecurityContext: &v1.PodSecurityContext{},
|
||||
SchedulerName: api.DefaultSchedulerName,
|
||||
|
||||
Containers: []v1.Container{{
|
||||
Name: "2",
|
||||
Image: "bar:bartag",
|
||||
TerminationMessagePath: "/dev/termination-log",
|
||||
ImagePullPolicy: "IfNotPresent",
|
||||
TerminationMessagePath: "/dev/termination-log",
|
||||
ImagePullPolicy: "IfNotPresent",
|
||||
TerminationMessagePolicy: v1.TerminationMessageReadFile,
|
||||
}},
|
||||
},
|
||||
Status: v1.PodStatus{
|
||||
|
@ -322,7 +331,7 @@ func TestURLWithHeader(t *testing.T) {
|
|||
APIVersion: api.Registry.GroupOrDie(v1.GroupName).GroupVersion.String(),
|
||||
Kind: "Pod",
|
||||
},
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "foo",
|
||||
UID: "111",
|
||||
Namespace: "mynamespace",
|
||||
|
|
2
vendor/k8s.io/kubernetes/pkg/kubelet/container/BUILD
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/kubelet/container/BUILD
generated
vendored
|
@ -31,7 +31,6 @@ go_library(
|
|||
"//pkg/kubelet/api/v1alpha1/runtime:go_default_library",
|
||||
"//pkg/kubelet/util/format:go_default_library",
|
||||
"//pkg/kubelet/util/ioutils:go_default_library",
|
||||
"//pkg/util/flowcontrol:go_default_library",
|
||||
"//pkg/util/hash:go_default_library",
|
||||
"//pkg/util/term:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
|
@ -43,6 +42,7 @@ go_library(
|
|||
"//vendor:k8s.io/apimachinery/pkg/types",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/errors",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/runtime",
|
||||
"//vendor:k8s.io/client-go/util/flowcontrol",
|
||||
],
|
||||
)
|
||||
|
||||
|
|
6
vendor/k8s.io/kubernetes/pkg/kubelet/container/helpers.go
generated
vendored
6
vendor/k8s.io/kubernetes/pkg/kubelet/container/helpers.go
generated
vendored
|
@ -197,14 +197,14 @@ func ConvertPodStatusToRunningPod(runtimeName string, podStatus *PodStatus) Pod
|
|||
// Populate sandboxes in kubecontainer.Pod
|
||||
for _, sandbox := range podStatus.SandboxStatuses {
|
||||
runningPod.Sandboxes = append(runningPod.Sandboxes, &Container{
|
||||
ID: ContainerID{Type: runtimeName, ID: *sandbox.Id},
|
||||
State: SandboxToContainerState(*sandbox.State),
|
||||
ID: ContainerID{Type: runtimeName, ID: sandbox.Id},
|
||||
State: SandboxToContainerState(sandbox.State),
|
||||
})
|
||||
}
|
||||
return runningPod
|
||||
}
|
||||
|
||||
// sandboxToContainerState converts runtimeApi.PodSandboxState to
|
||||
// SandboxToContainerState converts runtimeapi.PodSandboxState to
|
||||
// kubecontainer.ContainerState.
|
||||
// This is only needed because we need to return sandboxes as if they were
|
||||
// kubecontainer.Containers to avoid substantial changes to PLEG.
|
||||
|
|
3
vendor/k8s.io/kubernetes/pkg/kubelet/container/helpers_test.go
generated
vendored
3
vendor/k8s.io/kubernetes/pkg/kubelet/container/helpers_test.go
generated
vendored
|
@ -20,6 +20,7 @@ import (
|
|||
"reflect"
|
||||
"testing"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
)
|
||||
|
||||
|
@ -137,7 +138,7 @@ func TestExpandCommandAndArgs(t *testing.T) {
|
|||
|
||||
func TestShouldContainerBeRestarted(t *testing.T) {
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
UID: "12345678",
|
||||
Name: "foo",
|
||||
Namespace: "new",
|
||||
|
|
7
vendor/k8s.io/kubernetes/pkg/kubelet/container/os.go
generated
vendored
7
vendor/k8s.io/kubernetes/pkg/kubelet/container/os.go
generated
vendored
|
@ -32,6 +32,7 @@ type OSInterface interface {
|
|||
Remove(path string) error
|
||||
RemoveAll(path string) error
|
||||
Create(path string) (*os.File, error)
|
||||
Chmod(path string, perm os.FileMode) error
|
||||
Hostname() (name string, err error)
|
||||
Chtimes(path string, atime time.Time, mtime time.Time) error
|
||||
Pipe() (r *os.File, w *os.File, err error)
|
||||
|
@ -73,6 +74,12 @@ func (RealOS) Create(path string) (*os.File, error) {
|
|||
return os.Create(path)
|
||||
}
|
||||
|
||||
// Chmod will change the permissions on the specified path or return
|
||||
// an error.
|
||||
func (RealOS) Chmod(path string, perm os.FileMode) error {
|
||||
return os.Chmod(path, perm)
|
||||
}
|
||||
|
||||
// Hostname will call os.Hostname to return the hostname.
|
||||
func (RealOS) Hostname() (name string, err error) {
|
||||
return os.Hostname()
|
||||
|
|
2
vendor/k8s.io/kubernetes/pkg/kubelet/container/ref_test.go
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/kubelet/container/ref_test.go
generated
vendored
|
@ -69,7 +69,7 @@ func TestGenerateContainerRef(t *testing.T) {
|
|||
Kind: "Pod",
|
||||
APIVersion: api.Registry.GroupOrDie(v1.GroupName).GroupVersion.String(),
|
||||
},
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "ok",
|
||||
Namespace: "test-ns",
|
||||
UID: "bar",
|
||||
|
|
18
vendor/k8s.io/kubernetes/pkg/kubelet/container/runtime.go
generated
vendored
18
vendor/k8s.io/kubernetes/pkg/kubelet/container/runtime.go
generated
vendored
|
@ -26,9 +26,9 @@ import (
|
|||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/util/flowcontrol"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
runtimeapi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
||||
"k8s.io/kubernetes/pkg/util/flowcontrol"
|
||||
"k8s.io/kubernetes/pkg/util/term"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
)
|
||||
|
@ -626,3 +626,19 @@ func (s SortContainerStatusesByCreationTime) Swap(i, j int) { s[i], s[j] = s[j],
|
|||
func (s SortContainerStatusesByCreationTime) Less(i, j int) bool {
|
||||
return s[i].CreatedAt.Before(s[j].CreatedAt)
|
||||
}
|
||||
|
||||
const (
|
||||
// MaxPodTerminationMessageLogLength is the maximum bytes any one pod may have written
|
||||
// as termination message output across all containers. Containers will be evenly truncated
|
||||
// until output is below this limit.
|
||||
MaxPodTerminationMessageLogLength = 1024 * 12
|
||||
// MaxContainerTerminationMessageLength is the upper bound any one container may write to
|
||||
// its termination message path. Contents above this length will be truncated.
|
||||
MaxContainerTerminationMessageLength = 1024 * 4
|
||||
// MaxContainerTerminationMessageLogLength is the maximum bytes any one container will
|
||||
// have written to its termination message when the message is read from the logs.
|
||||
MaxContainerTerminationMessageLogLength = 1024 * 2
|
||||
// MaxContainerTerminationMessageLogLines is the maximum number of previous lines of
|
||||
// log output that the termination message can contain.
|
||||
MaxContainerTerminationMessageLogLines = 80
|
||||
)
|
||||
|
|
2
vendor/k8s.io/kubernetes/pkg/kubelet/container/testing/BUILD
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/kubelet/container/testing/BUILD
generated
vendored
|
@ -20,12 +20,12 @@ go_library(
|
|||
deps = [
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//pkg/kubelet/container:go_default_library",
|
||||
"//pkg/util/flowcontrol:go_default_library",
|
||||
"//pkg/util/term:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
"//vendor:github.com/golang/mock/gomock",
|
||||
"//vendor:github.com/stretchr/testify/mock",
|
||||
"//vendor:k8s.io/apimachinery/pkg/types",
|
||||
"//vendor:k8s.io/client-go/util/flowcontrol",
|
||||
],
|
||||
)
|
||||
|
||||
|
|
2
vendor/k8s.io/kubernetes/pkg/kubelet/container/testing/fake_runtime.go
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/kubelet/container/testing/fake_runtime.go
generated
vendored
|
@ -25,9 +25,9 @@ import (
|
|||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/util/flowcontrol"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
. "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
"k8s.io/kubernetes/pkg/util/flowcontrol"
|
||||
"k8s.io/kubernetes/pkg/util/term"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
)
|
||||
|
|
5
vendor/k8s.io/kubernetes/pkg/kubelet/container/testing/os.go
generated
vendored
5
vendor/k8s.io/kubernetes/pkg/kubelet/container/testing/os.go
generated
vendored
|
@ -83,6 +83,11 @@ func (FakeOS) Create(path string) (*os.File, error) {
|
|||
return nil, nil
|
||||
}
|
||||
|
||||
// Chmod is a fake call that returns nil.
|
||||
func (FakeOS) Chmod(path string, perm os.FileMode) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Hostname is a fake call that returns nil.
|
||||
func (f *FakeOS) Hostname() (name string, err error) {
|
||||
return f.HostName, nil
|
||||
|
|
2
vendor/k8s.io/kubernetes/pkg/kubelet/container/testing/runtime_mock.go
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/kubelet/container/testing/runtime_mock.go
generated
vendored
|
@ -22,9 +22,9 @@ import (
|
|||
|
||||
"github.com/stretchr/testify/mock"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/util/flowcontrol"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
. "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
"k8s.io/kubernetes/pkg/util/flowcontrol"
|
||||
"k8s.io/kubernetes/pkg/util/term"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
)
|
||||
|
|
3
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/BUILD
generated
vendored
3
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/BUILD
generated
vendored
|
@ -49,7 +49,6 @@ go_library(
|
|||
"//vendor:github.com/docker/engine-api/types/versions",
|
||||
"//vendor:github.com/docker/go-connections/nat",
|
||||
"//vendor:github.com/golang/glog",
|
||||
"//vendor:github.com/golang/protobuf/proto",
|
||||
],
|
||||
)
|
||||
|
||||
|
@ -78,12 +77,12 @@ go_test(
|
|||
"//pkg/kubelet/types:go_default_library",
|
||||
"//pkg/security/apparmor:go_default_library",
|
||||
"//pkg/securitycontext:go_default_library",
|
||||
"//pkg/util/clock:go_default_library",
|
||||
"//vendor:github.com/docker/engine-api/types",
|
||||
"//vendor:github.com/docker/engine-api/types/container",
|
||||
"//vendor:github.com/golang/mock/gomock",
|
||||
"//vendor:github.com/stretchr/testify/assert",
|
||||
"//vendor:github.com/stretchr/testify/require",
|
||||
"//vendor:k8s.io/client-go/util/clock",
|
||||
],
|
||||
)
|
||||
|
||||
|
|
32
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/convert.go
generated
vendored
32
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/convert.go
generated
vendored
|
@ -43,10 +43,10 @@ func imageToRuntimeAPIImage(image *dockertypes.Image) (*runtimeapi.Image, error)
|
|||
|
||||
size := uint64(image.VirtualSize)
|
||||
return &runtimeapi.Image{
|
||||
Id: &image.ID,
|
||||
Id: image.ID,
|
||||
RepoTags: image.RepoTags,
|
||||
RepoDigests: image.RepoDigests,
|
||||
Size_: &size,
|
||||
Size_: size,
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
@ -57,13 +57,17 @@ func imageInspectToRuntimeAPIImage(image *dockertypes.ImageInspect) (*runtimeapi
|
|||
|
||||
size := uint64(image.VirtualSize)
|
||||
runtimeImage := &runtimeapi.Image{
|
||||
Id: &image.ID,
|
||||
Id: image.ID,
|
||||
RepoTags: image.RepoTags,
|
||||
RepoDigests: image.RepoDigests,
|
||||
Size_: &size,
|
||||
Size_: size,
|
||||
}
|
||||
|
||||
runtimeImage.Uid, runtimeImage.Username = getUserFromImageUser(image.Config.User)
|
||||
uid, username := getUserFromImageUser(image.Config.User)
|
||||
if uid != nil {
|
||||
runtimeImage.Uid = &runtimeapi.Int64Value{Value: *uid}
|
||||
}
|
||||
runtimeImage.Username = username
|
||||
return runtimeImage, nil
|
||||
}
|
||||
|
||||
|
@ -91,13 +95,13 @@ func toRuntimeAPIContainer(c *dockertypes.Container) (*runtimeapi.Container, err
|
|||
// The timestamp in dockertypes.Container is in seconds.
|
||||
createdAt := c.Created * int64(time.Second)
|
||||
return &runtimeapi.Container{
|
||||
Id: &c.ID,
|
||||
PodSandboxId: &sandboxID,
|
||||
Id: c.ID,
|
||||
PodSandboxId: sandboxID,
|
||||
Metadata: metadata,
|
||||
Image: &runtimeapi.ImageSpec{Image: &c.Image},
|
||||
ImageRef: &c.ImageID,
|
||||
State: &state,
|
||||
CreatedAt: &createdAt,
|
||||
Image: &runtimeapi.ImageSpec{Image: c.Image},
|
||||
ImageRef: c.ImageID,
|
||||
State: state,
|
||||
CreatedAt: createdAt,
|
||||
Labels: labels,
|
||||
Annotations: annotations,
|
||||
}, nil
|
||||
|
@ -157,10 +161,10 @@ func toRuntimeAPISandbox(c *dockertypes.Container) (*runtimeapi.PodSandbox, erro
|
|||
// The timestamp in dockertypes.Container is in seconds.
|
||||
createdAt := c.Created * int64(time.Second)
|
||||
return &runtimeapi.PodSandbox{
|
||||
Id: &c.ID,
|
||||
Id: c.ID,
|
||||
Metadata: metadata,
|
||||
State: &state,
|
||||
CreatedAt: &createdAt,
|
||||
State: state,
|
||||
CreatedAt: createdAt,
|
||||
Labels: labels,
|
||||
Annotations: annotations,
|
||||
}, nil
|
||||
|
|
78
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_container.go
generated
vendored
78
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_container.go
generated
vendored
|
@ -42,14 +42,14 @@ func (ds *dockerService) ListContainers(filter *runtimeapi.ContainerFilter) ([]*
|
|||
f.AddLabel(containerTypeLabelKey, containerTypeLabelContainer)
|
||||
|
||||
if filter != nil {
|
||||
if filter.Id != nil {
|
||||
f.Add("id", filter.GetId())
|
||||
if filter.Id != "" {
|
||||
f.Add("id", filter.Id)
|
||||
}
|
||||
if filter.State != nil {
|
||||
f.Add("status", toDockerContainerStatus(filter.GetState()))
|
||||
f.Add("status", toDockerContainerStatus(filter.GetState().State))
|
||||
}
|
||||
if filter.PodSandboxId != nil {
|
||||
f.AddLabel(sandboxIDLabelKey, *filter.PodSandboxId)
|
||||
if filter.PodSandboxId != "" {
|
||||
f.AddLabel(sandboxIDLabelKey, filter.PodSandboxId)
|
||||
}
|
||||
|
||||
if filter.LabelSelector != nil {
|
||||
|
@ -87,35 +87,35 @@ func (ds *dockerService) CreateContainer(podSandboxID string, config *runtimeapi
|
|||
return "", fmt.Errorf("container config is nil")
|
||||
}
|
||||
if sandboxConfig == nil {
|
||||
return "", fmt.Errorf("sandbox config is nil for container %q", config.Metadata.GetName())
|
||||
return "", fmt.Errorf("sandbox config is nil for container %q", config.Metadata.Name)
|
||||
}
|
||||
|
||||
labels := makeLabels(config.GetLabels(), config.GetAnnotations())
|
||||
// Apply a the container type label.
|
||||
labels[containerTypeLabelKey] = containerTypeLabelContainer
|
||||
// Write the container log path in the labels.
|
||||
labels[containerLogPathLabelKey] = filepath.Join(sandboxConfig.GetLogDirectory(), config.GetLogPath())
|
||||
labels[containerLogPathLabelKey] = filepath.Join(sandboxConfig.LogDirectory, config.LogPath)
|
||||
// Write the sandbox ID in the labels.
|
||||
labels[sandboxIDLabelKey] = podSandboxID
|
||||
|
||||
image := ""
|
||||
if iSpec := config.GetImage(); iSpec != nil {
|
||||
image = iSpec.GetImage()
|
||||
image = iSpec.Image
|
||||
}
|
||||
createConfig := dockertypes.ContainerCreateConfig{
|
||||
Name: makeContainerName(sandboxConfig, config),
|
||||
Config: &dockercontainer.Config{
|
||||
// TODO: set User.
|
||||
Entrypoint: dockerstrslice.StrSlice(config.GetCommand()),
|
||||
Cmd: dockerstrslice.StrSlice(config.GetArgs()),
|
||||
Entrypoint: dockerstrslice.StrSlice(config.Command),
|
||||
Cmd: dockerstrslice.StrSlice(config.Args),
|
||||
Env: generateEnvList(config.GetEnvs()),
|
||||
Image: image,
|
||||
WorkingDir: config.GetWorkingDir(),
|
||||
WorkingDir: config.WorkingDir,
|
||||
Labels: labels,
|
||||
// Interactive containers:
|
||||
OpenStdin: config.GetStdin(),
|
||||
StdinOnce: config.GetStdinOnce(),
|
||||
Tty: config.GetTty(),
|
||||
OpenStdin: config.Stdin,
|
||||
StdinOnce: config.StdinOnce,
|
||||
Tty: config.Tty,
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -132,13 +132,13 @@ func (ds *dockerService) CreateContainer(podSandboxID string, config *runtimeapi
|
|||
rOpts := lc.GetResources()
|
||||
if rOpts != nil {
|
||||
hc.Resources = dockercontainer.Resources{
|
||||
Memory: rOpts.GetMemoryLimitInBytes(),
|
||||
Memory: rOpts.MemoryLimitInBytes,
|
||||
MemorySwap: dockertools.DefaultMemorySwap(),
|
||||
CPUShares: rOpts.GetCpuShares(),
|
||||
CPUQuota: rOpts.GetCpuQuota(),
|
||||
CPUPeriod: rOpts.GetCpuPeriod(),
|
||||
CPUShares: rOpts.CpuShares,
|
||||
CPUQuota: rOpts.CpuQuota,
|
||||
CPUPeriod: rOpts.CpuPeriod,
|
||||
}
|
||||
hc.OomScoreAdj = int(rOpts.GetOomScoreAdj())
|
||||
hc.OomScoreAdj = int(rOpts.OomScoreAdj)
|
||||
}
|
||||
// Note: ShmSize is handled in kube_docker_client.go
|
||||
|
||||
|
@ -149,9 +149,9 @@ func (ds *dockerService) CreateContainer(podSandboxID string, config *runtimeapi
|
|||
// Apply cgroupsParent derived from the sandbox config.
|
||||
if lc := sandboxConfig.GetLinux(); lc != nil {
|
||||
// Apply Cgroup options.
|
||||
cgroupParent, err := ds.GenerateExpectedCgroupParent(lc.GetCgroupParent())
|
||||
cgroupParent, err := ds.GenerateExpectedCgroupParent(lc.CgroupParent)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to generate cgroup parent in expected syntax for container %q: %v", config.Metadata.GetName(), err)
|
||||
return "", fmt.Errorf("failed to generate cgroup parent in expected syntax for container %q: %v", config.Metadata.Name, err)
|
||||
}
|
||||
hc.CgroupParent = cgroupParent
|
||||
}
|
||||
|
@ -160,17 +160,17 @@ func (ds *dockerService) CreateContainer(podSandboxID string, config *runtimeapi
|
|||
devices := make([]dockercontainer.DeviceMapping, len(config.Devices))
|
||||
for i, device := range config.Devices {
|
||||
devices[i] = dockercontainer.DeviceMapping{
|
||||
PathOnHost: device.GetHostPath(),
|
||||
PathInContainer: device.GetContainerPath(),
|
||||
CgroupPermissions: device.GetPermissions(),
|
||||
PathOnHost: device.HostPath,
|
||||
PathInContainer: device.ContainerPath,
|
||||
CgroupPermissions: device.Permissions,
|
||||
}
|
||||
}
|
||||
hc.Resources.Devices = devices
|
||||
|
||||
// Apply appArmor and seccomp options.
|
||||
securityOpts, err := getContainerSecurityOpts(config.Metadata.GetName(), sandboxConfig, ds.seccompProfileRoot)
|
||||
securityOpts, err := getContainerSecurityOpts(config.Metadata.Name, sandboxConfig, ds.seccompProfileRoot)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to generate container security options for container %q: %v", config.Metadata.GetName(), err)
|
||||
return "", fmt.Errorf("failed to generate container security options for container %q: %v", config.Metadata.Name, err)
|
||||
}
|
||||
hc.SecurityOpt = append(hc.SecurityOpt, securityOpts...)
|
||||
|
||||
|
@ -310,9 +310,9 @@ func (ds *dockerService) ContainerStatus(containerID string) (*runtimeapi.Contai
|
|||
m := r.Mounts[i]
|
||||
readonly := !m.RW
|
||||
mounts = append(mounts, &runtimeapi.Mount{
|
||||
HostPath: &m.Source,
|
||||
ContainerPath: &m.Destination,
|
||||
Readonly: &readonly,
|
||||
HostPath: m.Source,
|
||||
ContainerPath: m.Destination,
|
||||
Readonly: readonly,
|
||||
// Note: Can't set SeLinuxRelabel
|
||||
})
|
||||
}
|
||||
|
@ -369,18 +369,18 @@ func (ds *dockerService) ContainerStatus(containerID string) (*runtimeapi.Contai
|
|||
imageName = ir.RepoTags[0]
|
||||
}
|
||||
return &runtimeapi.ContainerStatus{
|
||||
Id: &r.ID,
|
||||
Id: r.ID,
|
||||
Metadata: metadata,
|
||||
Image: &runtimeapi.ImageSpec{Image: &imageName},
|
||||
ImageRef: &imageID,
|
||||
Image: &runtimeapi.ImageSpec{Image: imageName},
|
||||
ImageRef: imageID,
|
||||
Mounts: mounts,
|
||||
ExitCode: &exitCode,
|
||||
State: &state,
|
||||
CreatedAt: &ct,
|
||||
StartedAt: &st,
|
||||
FinishedAt: &ft,
|
||||
Reason: &reason,
|
||||
Message: &message,
|
||||
ExitCode: exitCode,
|
||||
State: state,
|
||||
CreatedAt: ct,
|
||||
StartedAt: st,
|
||||
FinishedAt: ft,
|
||||
Reason: reason,
|
||||
Message: message,
|
||||
Labels: labels,
|
||||
Annotations: annotations,
|
||||
}, nil
|
||||
|
|
50
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_container_test.go
generated
vendored
50
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_container_test.go
generated
vendored
|
@ -32,10 +32,10 @@ import (
|
|||
func makeContainerConfig(sConfig *runtimeapi.PodSandboxConfig, name, image string, attempt uint32, labels, annotations map[string]string) *runtimeapi.ContainerConfig {
|
||||
return &runtimeapi.ContainerConfig{
|
||||
Metadata: &runtimeapi.ContainerMetadata{
|
||||
Name: &name,
|
||||
Attempt: &attempt,
|
||||
Name: name,
|
||||
Attempt: attempt,
|
||||
},
|
||||
Image: &runtimeapi.ImageSpec{Image: &image},
|
||||
Image: &runtimeapi.ImageSpec{Image: image},
|
||||
Labels: labels,
|
||||
Annotations: annotations,
|
||||
}
|
||||
|
@ -77,12 +77,12 @@ func TestListContainers(t *testing.T) {
|
|||
// the most recent containers first.
|
||||
expected = append([]*runtimeapi.Container{{
|
||||
Metadata: configs[i].Metadata,
|
||||
Id: &id,
|
||||
PodSandboxId: &sandboxID,
|
||||
State: &state,
|
||||
CreatedAt: &createdAt,
|
||||
Id: id,
|
||||
PodSandboxId: sandboxID,
|
||||
State: state,
|
||||
CreatedAt: createdAt,
|
||||
Image: configs[i].Image,
|
||||
ImageRef: &imageRef,
|
||||
ImageRef: imageRef,
|
||||
Labels: configs[i].Labels,
|
||||
Annotations: configs[i].Annotations,
|
||||
}}, expected...)
|
||||
|
@ -112,16 +112,16 @@ func TestContainerStatus(t *testing.T) {
|
|||
var reason, message string
|
||||
|
||||
expected := &runtimeapi.ContainerStatus{
|
||||
State: &state,
|
||||
CreatedAt: &ct,
|
||||
StartedAt: &st,
|
||||
FinishedAt: &ft,
|
||||
State: state,
|
||||
CreatedAt: ct,
|
||||
StartedAt: st,
|
||||
FinishedAt: ft,
|
||||
Metadata: config.Metadata,
|
||||
Image: config.Image,
|
||||
ImageRef: &imageRef,
|
||||
ExitCode: &exitCode,
|
||||
Reason: &reason,
|
||||
Message: &message,
|
||||
ImageRef: imageRef,
|
||||
ExitCode: exitCode,
|
||||
Reason: reason,
|
||||
Message: message,
|
||||
Mounts: []*runtimeapi.Mount{},
|
||||
Labels: config.Labels,
|
||||
Annotations: config.Annotations,
|
||||
|
@ -129,7 +129,7 @@ func TestContainerStatus(t *testing.T) {
|
|||
|
||||
// Create the container.
|
||||
fClock.SetTime(time.Now().Add(-1 * time.Hour))
|
||||
*expected.CreatedAt = fClock.Now().UnixNano()
|
||||
expected.CreatedAt = fClock.Now().UnixNano()
|
||||
const sandboxId = "sandboxid"
|
||||
id, err := ds.CreateContainer(sandboxId, config, sConfig)
|
||||
|
||||
|
@ -140,7 +140,7 @@ func TestContainerStatus(t *testing.T) {
|
|||
assert.Equal(t, c.Config.Labels[sandboxIDLabelKey], sandboxId)
|
||||
|
||||
// Set the id manually since we don't know the id until it's created.
|
||||
expected.Id = &id
|
||||
expected.Id = id
|
||||
assert.NoError(t, err)
|
||||
status, err := ds.ContainerStatus(id)
|
||||
assert.NoError(t, err)
|
||||
|
@ -148,8 +148,8 @@ func TestContainerStatus(t *testing.T) {
|
|||
|
||||
// Advance the clock and start the container.
|
||||
fClock.SetTime(time.Now())
|
||||
*expected.StartedAt = fClock.Now().UnixNano()
|
||||
*expected.State = runtimeapi.ContainerState_CONTAINER_RUNNING
|
||||
expected.StartedAt = fClock.Now().UnixNano()
|
||||
expected.State = runtimeapi.ContainerState_CONTAINER_RUNNING
|
||||
|
||||
err = ds.StartContainer(id)
|
||||
assert.NoError(t, err)
|
||||
|
@ -158,9 +158,9 @@ func TestContainerStatus(t *testing.T) {
|
|||
|
||||
// Advance the clock and stop the container.
|
||||
fClock.SetTime(time.Now().Add(1 * time.Hour))
|
||||
*expected.FinishedAt = fClock.Now().UnixNano()
|
||||
*expected.State = runtimeapi.ContainerState_CONTAINER_EXITED
|
||||
*expected.Reason = "Completed"
|
||||
expected.FinishedAt = fClock.Now().UnixNano()
|
||||
expected.State = runtimeapi.ContainerState_CONTAINER_EXITED
|
||||
expected.Reason = "Completed"
|
||||
|
||||
err = ds.StopContainer(id, 0)
|
||||
assert.NoError(t, err)
|
||||
|
@ -181,9 +181,9 @@ func TestContainerLogPath(t *testing.T) {
|
|||
containerLogPath := "0"
|
||||
kubeletContainerLogPath := filepath.Join(podLogPath, containerLogPath)
|
||||
sConfig := makeSandboxConfig("foo", "bar", "1", 0)
|
||||
sConfig.LogDirectory = &podLogPath
|
||||
sConfig.LogDirectory = podLogPath
|
||||
config := makeContainerConfig(sConfig, "pause", "iamimage", 0, nil, nil)
|
||||
config.LogPath = &containerLogPath
|
||||
config.LogPath = containerLogPath
|
||||
|
||||
const sandboxId = "sandboxid"
|
||||
id, err := ds.CreateContainer(sandboxId, config, sConfig)
|
||||
|
|
28
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_image.go
generated
vendored
28
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_image.go
generated
vendored
|
@ -29,7 +29,7 @@ func (ds *dockerService) ListImages(filter *runtimeapi.ImageFilter) ([]*runtimea
|
|||
opts := dockertypes.ImageListOptions{}
|
||||
if filter != nil {
|
||||
if imgSpec := filter.GetImage(); imgSpec != nil {
|
||||
opts.MatchName = imgSpec.GetImage()
|
||||
opts.MatchName = imgSpec.Image
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -52,7 +52,7 @@ func (ds *dockerService) ListImages(filter *runtimeapi.ImageFilter) ([]*runtimea
|
|||
|
||||
// ImageStatus returns the status of the image, returns nil if the image doesn't present.
|
||||
func (ds *dockerService) ImageStatus(image *runtimeapi.ImageSpec) (*runtimeapi.Image, error) {
|
||||
imageInspect, err := ds.client.InspectImageByRef(image.GetImage())
|
||||
imageInspect, err := ds.client.InspectImageByRef(image.Image)
|
||||
if err != nil {
|
||||
if dockertools.IsImageNotFoundError(err) {
|
||||
return nil, nil
|
||||
|
@ -64,21 +64,23 @@ func (ds *dockerService) ImageStatus(image *runtimeapi.ImageSpec) (*runtimeapi.I
|
|||
|
||||
// PullImage pulls an image with authentication config.
|
||||
func (ds *dockerService) PullImage(image *runtimeapi.ImageSpec, auth *runtimeapi.AuthConfig) (string, error) {
|
||||
err := ds.client.PullImage(image.GetImage(),
|
||||
dockertypes.AuthConfig{
|
||||
Username: auth.GetUsername(),
|
||||
Password: auth.GetPassword(),
|
||||
ServerAddress: auth.GetServerAddress(),
|
||||
IdentityToken: auth.GetIdentityToken(),
|
||||
RegistryToken: auth.GetRegistryToken(),
|
||||
},
|
||||
authConfig := dockertypes.AuthConfig{}
|
||||
if auth != nil {
|
||||
authConfig.Username = auth.Username
|
||||
authConfig.Password = auth.Password
|
||||
authConfig.ServerAddress = auth.ServerAddress
|
||||
authConfig.IdentityToken = auth.IdentityToken
|
||||
authConfig.RegistryToken = auth.RegistryToken
|
||||
}
|
||||
err := ds.client.PullImage(image.Image,
|
||||
authConfig,
|
||||
dockertypes.ImagePullOptions{},
|
||||
)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return dockertools.GetImageRef(ds.client, image.GetImage())
|
||||
return dockertools.GetImageRef(ds.client, image.Image)
|
||||
}
|
||||
|
||||
// RemoveImage removes the image.
|
||||
|
@ -86,7 +88,7 @@ func (ds *dockerService) RemoveImage(image *runtimeapi.ImageSpec) error {
|
|||
// If the image has multiple tags, we need to remove all the tags
|
||||
// TODO: We assume image.Image is image ID here, which is true in the current implementation
|
||||
// of kubelet, but we should still clarify this in CRI.
|
||||
imageInspect, err := ds.client.InspectImageByID(image.GetImage())
|
||||
imageInspect, err := ds.client.InspectImageByID(image.Image)
|
||||
if err == nil && imageInspect != nil && len(imageInspect.RepoTags) > 1 {
|
||||
for _, tag := range imageInspect.RepoTags {
|
||||
if _, err := ds.client.RemoveImage(tag, dockertypes.ImageRemoveOptions{PruneChildren: true}); err != nil {
|
||||
|
@ -96,6 +98,6 @@ func (ds *dockerService) RemoveImage(image *runtimeapi.ImageSpec) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
_, err = ds.client.RemoveImage(image.GetImage(), dockertypes.ImageRemoveOptions{PruneChildren: true})
|
||||
_, err = ds.client.RemoveImage(image.Image, dockertypes.ImageRemoveOptions{PruneChildren: true})
|
||||
return err
|
||||
}
|
||||
|
|
4
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_image_test.go
generated
vendored
4
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_image_test.go
generated
vendored
|
@ -29,7 +29,7 @@ func TestRemoveImage(t *testing.T) {
|
|||
ds, fakeDocker, _ := newTestDockerService()
|
||||
id := "1111"
|
||||
fakeDocker.Image = &dockertypes.ImageInspect{ID: id, RepoTags: []string{"foo"}}
|
||||
ds.RemoveImage(&runtimeapi.ImageSpec{Image: &id})
|
||||
ds.RemoveImage(&runtimeapi.ImageSpec{Image: id})
|
||||
fakeDocker.AssertCallDetails(dockertools.NewCalledDetail("inspect_image", nil),
|
||||
dockertools.NewCalledDetail("remove_image", []interface{}{id, dockertypes.ImageRemoveOptions{PruneChildren: true}}))
|
||||
}
|
||||
|
@ -38,7 +38,7 @@ func TestRemoveImageWithMultipleTags(t *testing.T) {
|
|||
ds, fakeDocker, _ := newTestDockerService()
|
||||
id := "1111"
|
||||
fakeDocker.Image = &dockertypes.ImageInspect{ID: id, RepoTags: []string{"foo", "bar"}}
|
||||
ds.RemoveImage(&runtimeapi.ImageSpec{Image: &id})
|
||||
ds.RemoveImage(&runtimeapi.ImageSpec{Image: id})
|
||||
fakeDocker.AssertCallDetails(dockertools.NewCalledDetail("inspect_image", nil),
|
||||
dockertools.NewCalledDetail("remove_image", []interface{}{"foo", dockertypes.ImageRemoveOptions{PruneChildren: true}}),
|
||||
dockertools.NewCalledDetail("remove_image", []interface{}{"bar", dockertypes.ImageRemoveOptions{PruneChildren: true}}))
|
||||
|
|
54
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_sandbox.go
generated
vendored
54
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_sandbox.go
generated
vendored
|
@ -66,13 +66,13 @@ func (ds *dockerService) RunPodSandbox(config *runtimeapi.PodSandboxConfig) (str
|
|||
// Step 2: Create the sandbox container.
|
||||
createConfig, err := ds.makeSandboxDockerConfig(config, image)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to make sandbox docker config for pod %q: %v", config.Metadata.GetName(), err)
|
||||
return "", fmt.Errorf("failed to make sandbox docker config for pod %q: %v", config.Metadata.Name, err)
|
||||
}
|
||||
createResp, err := ds.client.CreateContainer(*createConfig)
|
||||
recoverFromConflictIfNeeded(ds.client, err)
|
||||
|
||||
if err != nil || createResp == nil {
|
||||
return "", fmt.Errorf("failed to create a sandbox for pod %q: %v", config.Metadata.GetName(), err)
|
||||
return "", fmt.Errorf("failed to create a sandbox for pod %q: %v", config.Metadata.Name, err)
|
||||
}
|
||||
|
||||
// Step 3: Start the sandbox container.
|
||||
|
@ -80,9 +80,9 @@ func (ds *dockerService) RunPodSandbox(config *runtimeapi.PodSandboxConfig) (str
|
|||
// startContainer failed.
|
||||
err = ds.client.StartContainer(createResp.ID)
|
||||
if err != nil {
|
||||
return createResp.ID, fmt.Errorf("failed to start sandbox container for pod %q: %v", config.Metadata.GetName(), err)
|
||||
return createResp.ID, fmt.Errorf("failed to start sandbox container for pod %q: %v", config.Metadata.Name, err)
|
||||
}
|
||||
if config.GetLinux().GetSecurityContext().GetNamespaceOptions().GetHostNetwork() {
|
||||
if nsOptions := config.GetLinux().GetSecurityContext().GetNamespaceOptions(); nsOptions != nil && nsOptions.HostNetwork {
|
||||
return createResp.ID, nil
|
||||
}
|
||||
|
||||
|
@ -94,7 +94,7 @@ func (ds *dockerService) RunPodSandbox(config *runtimeapi.PodSandboxConfig) (str
|
|||
// on the host as well, to satisfy parts of the pod spec that aren't
|
||||
// recognized by the CNI standard yet.
|
||||
cID := kubecontainer.BuildContainerID(runtimeName, createResp.ID)
|
||||
err = ds.networkPlugin.SetUpPod(config.GetMetadata().GetNamespace(), config.GetMetadata().GetName(), cID)
|
||||
err = ds.networkPlugin.SetUpPod(config.GetMetadata().Namespace, config.GetMetadata().Name, cID)
|
||||
// TODO: Do we need to teardown on failure or can we rely on a StopPodSandbox call with the given ID?
|
||||
return createResp.ID, err
|
||||
}
|
||||
|
@ -109,16 +109,16 @@ func (ds *dockerService) StopPodSandbox(podSandboxID string) error {
|
|||
if err != nil {
|
||||
return fmt.Errorf("Failed to get sandbox status: %v", err)
|
||||
}
|
||||
if !status.GetLinux().GetNamespaces().GetOptions().GetHostNetwork() {
|
||||
if nsOpts := status.GetLinux().GetNamespaces().GetOptions(); nsOpts != nil && !nsOpts.HostNetwork {
|
||||
m := status.GetMetadata()
|
||||
cID := kubecontainer.BuildContainerID(runtimeName, podSandboxID)
|
||||
if err := ds.networkPlugin.TearDownPod(m.GetNamespace(), m.GetName(), cID); err != nil {
|
||||
if err := ds.networkPlugin.TearDownPod(m.Namespace, m.Name, cID); err != nil {
|
||||
// TODO: Figure out a way to retry this error. We can't
|
||||
// right now because the plugin throws errors when it doesn't find
|
||||
// eth0, which might not exist for various reasons (setup failed,
|
||||
// conf changed etc). In theory, it should teardown everything else
|
||||
// so there's no need to retry.
|
||||
glog.Errorf("Failed to teardown sandbox %v for pod %v/%v: %v", m.GetNamespace(), m.GetName(), podSandboxID, err)
|
||||
glog.Errorf("Failed to teardown sandbox %v for pod %v/%v: %v", m.Namespace, m.Name, podSandboxID, err)
|
||||
}
|
||||
}
|
||||
return ds.client.StopContainer(podSandboxID, defaultSandboxGracePeriod)
|
||||
|
@ -138,12 +138,12 @@ func (ds *dockerService) getIPFromPlugin(sandbox *dockertypes.ContainerJSON) (st
|
|||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
msg := fmt.Sprintf("Couldn't find network status for %s/%s through plugin", *metadata.Namespace, *metadata.Name)
|
||||
msg := fmt.Sprintf("Couldn't find network status for %s/%s through plugin", metadata.Namespace, metadata.Name)
|
||||
if sharesHostNetwork(sandbox) {
|
||||
return "", fmt.Errorf("%v: not responsible for host-network sandboxes", msg)
|
||||
}
|
||||
cID := kubecontainer.BuildContainerID(runtimeName, sandbox.ID)
|
||||
networkStatus, err := ds.networkPlugin.GetPodNetworkStatus(*metadata.Namespace, *metadata.Name, cID)
|
||||
networkStatus, err := ds.networkPlugin.GetPodNetworkStatus(metadata.Namespace, metadata.Name, cID)
|
||||
if err != nil {
|
||||
// This might be a sandbox that somehow ended up without a default
|
||||
// interface (eth0). We can't distinguish this from a more serious
|
||||
|
@ -203,7 +203,7 @@ func (ds *dockerService) PodSandboxStatus(podSandboxID string) (*runtimeapi.PodS
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
network := &runtimeapi.PodSandboxNetworkStatus{Ip: &IP}
|
||||
network := &runtimeapi.PodSandboxNetworkStatus{Ip: IP}
|
||||
netNS := getNetworkNamespace(r)
|
||||
|
||||
metadata, err := parseSandboxName(r.Name)
|
||||
|
@ -213,18 +213,18 @@ func (ds *dockerService) PodSandboxStatus(podSandboxID string) (*runtimeapi.PodS
|
|||
hostNetwork := sharesHostNetwork(r)
|
||||
labels, annotations := extractLabels(r.Config.Labels)
|
||||
return &runtimeapi.PodSandboxStatus{
|
||||
Id: &r.ID,
|
||||
State: &state,
|
||||
CreatedAt: &ct,
|
||||
Id: r.ID,
|
||||
State: state,
|
||||
CreatedAt: ct,
|
||||
Metadata: metadata,
|
||||
Labels: labels,
|
||||
Annotations: annotations,
|
||||
Network: network,
|
||||
Linux: &runtimeapi.LinuxPodSandboxStatus{
|
||||
Namespaces: &runtimeapi.Namespace{
|
||||
Network: &netNS,
|
||||
Network: netNS,
|
||||
Options: &runtimeapi.NamespaceOption{
|
||||
HostNetwork: &hostNetwork,
|
||||
HostNetwork: hostNetwork,
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -243,11 +243,11 @@ func (ds *dockerService) ListPodSandbox(filter *runtimeapi.PodSandboxFilter) ([]
|
|||
f.AddLabel(containerTypeLabelKey, containerTypeLabelSandbox)
|
||||
|
||||
if filter != nil {
|
||||
if filter.Id != nil {
|
||||
f.Add("id", filter.GetId())
|
||||
if filter.Id != "" {
|
||||
f.Add("id", filter.Id)
|
||||
}
|
||||
if filter.State != nil {
|
||||
if filter.GetState() == runtimeapi.PodSandboxState_SANDBOX_READY {
|
||||
if filter.GetState().State == runtimeapi.PodSandboxState_SANDBOX_READY {
|
||||
// Only list running containers.
|
||||
opts.All = false
|
||||
} else {
|
||||
|
@ -280,7 +280,7 @@ func (ds *dockerService) ListPodSandbox(filter *runtimeapi.PodSandboxFilter) ([]
|
|||
glog.V(4).Infof("Unable to convert docker to runtime API sandbox: %v", err)
|
||||
continue
|
||||
}
|
||||
if filterOutReadySandboxes && converted.GetState() == runtimeapi.PodSandboxState_SANDBOX_READY {
|
||||
if filterOutReadySandboxes && converted.State == runtimeapi.PodSandboxState_SANDBOX_READY {
|
||||
continue
|
||||
}
|
||||
|
||||
|
@ -292,7 +292,7 @@ func (ds *dockerService) ListPodSandbox(filter *runtimeapi.PodSandboxFilter) ([]
|
|||
// applySandboxLinuxOptions applies LinuxPodSandboxConfig to dockercontainer.HostConfig and dockercontainer.ContainerCreateConfig.
|
||||
func (ds *dockerService) applySandboxLinuxOptions(hc *dockercontainer.HostConfig, lc *runtimeapi.LinuxPodSandboxConfig, createConfig *dockertypes.ContainerCreateConfig, image string) error {
|
||||
// Apply Cgroup options.
|
||||
cgroupParent, err := ds.GenerateExpectedCgroupParent(lc.GetCgroupParent())
|
||||
cgroupParent, err := ds.GenerateExpectedCgroupParent(lc.CgroupParent)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -317,7 +317,7 @@ func (ds *dockerService) makeSandboxDockerConfig(c *runtimeapi.PodSandboxConfig,
|
|||
createConfig := &dockertypes.ContainerCreateConfig{
|
||||
Name: makeSandboxName(c),
|
||||
Config: &dockercontainer.Config{
|
||||
Hostname: c.GetHostname(),
|
||||
Hostname: c.Hostname,
|
||||
// TODO: Handle environment variables.
|
||||
Image: image,
|
||||
Labels: labels,
|
||||
|
@ -328,7 +328,7 @@ func (ds *dockerService) makeSandboxDockerConfig(c *runtimeapi.PodSandboxConfig,
|
|||
// Set sysctls if requested
|
||||
sysctls, err := getSysctlsFromAnnotations(c.Annotations)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get sysctls from annotations %v for sandbox %q: %v", c.Annotations, c.Metadata.GetName(), err)
|
||||
return nil, fmt.Errorf("failed to get sysctls from annotations %v for sandbox %q: %v", c.Annotations, c.Metadata.Name, err)
|
||||
}
|
||||
hc.Sysctls = sysctls
|
||||
|
||||
|
@ -346,9 +346,9 @@ func (ds *dockerService) makeSandboxDockerConfig(c *runtimeapi.PodSandboxConfig,
|
|||
|
||||
// Set DNS options.
|
||||
if dnsConfig := c.GetDnsConfig(); dnsConfig != nil {
|
||||
hc.DNS = dnsConfig.GetServers()
|
||||
hc.DNSSearch = dnsConfig.GetSearches()
|
||||
hc.DNSOptions = dnsConfig.GetOptions()
|
||||
hc.DNS = dnsConfig.Servers
|
||||
hc.DNSSearch = dnsConfig.Searches
|
||||
hc.DNSOptions = dnsConfig.Options
|
||||
}
|
||||
|
||||
// Apply resource options.
|
||||
|
@ -357,7 +357,7 @@ func (ds *dockerService) makeSandboxDockerConfig(c *runtimeapi.PodSandboxConfig,
|
|||
// Set security options.
|
||||
securityOpts, err := getSandboxSecurityOpts(c, ds.seccompProfileRoot)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to generate sandbox security options for sandbox %q: %v", c.Metadata.GetName(), err)
|
||||
return nil, fmt.Errorf("failed to generate sandbox security options for sandbox %q: %v", c.Metadata.Name, err)
|
||||
}
|
||||
hc.SecurityOpt = append(hc.SecurityOpt, securityOpts...)
|
||||
return createConfig, nil
|
||||
|
|
30
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_sandbox_test.go
generated
vendored
30
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_sandbox_test.go
generated
vendored
|
@ -37,10 +37,10 @@ func makeSandboxConfig(name, namespace, uid string, attempt uint32) *runtimeapi.
|
|||
func makeSandboxConfigWithLabelsAndAnnotations(name, namespace, uid string, attempt uint32, labels, annotations map[string]string) *runtimeapi.PodSandboxConfig {
|
||||
return &runtimeapi.PodSandboxConfig{
|
||||
Metadata: &runtimeapi.PodSandboxMetadata{
|
||||
Name: &name,
|
||||
Namespace: &namespace,
|
||||
Uid: &uid,
|
||||
Attempt: &attempt,
|
||||
Name: name,
|
||||
Namespace: namespace,
|
||||
Uid: uid,
|
||||
Attempt: attempt,
|
||||
},
|
||||
Labels: labels,
|
||||
Annotations: annotations,
|
||||
|
@ -72,9 +72,9 @@ func TestListSandboxes(t *testing.T) {
|
|||
// the most recent sandbox first.
|
||||
expected = append([]*runtimeapi.PodSandbox{{
|
||||
Metadata: configs[i].Metadata,
|
||||
Id: &id,
|
||||
State: &state,
|
||||
CreatedAt: &createdAt,
|
||||
Id: id,
|
||||
State: state,
|
||||
CreatedAt: createdAt,
|
||||
Labels: configs[i].Labels,
|
||||
Annotations: configs[i].Annotations,
|
||||
}}, expected...)
|
||||
|
@ -102,18 +102,18 @@ func TestSandboxStatus(t *testing.T) {
|
|||
ct := int64(0)
|
||||
hostNetwork := false
|
||||
expected := &runtimeapi.PodSandboxStatus{
|
||||
State: &state,
|
||||
CreatedAt: &ct,
|
||||
State: state,
|
||||
CreatedAt: ct,
|
||||
Metadata: config.Metadata,
|
||||
Network: &runtimeapi.PodSandboxNetworkStatus{Ip: &fakeIP},
|
||||
Linux: &runtimeapi.LinuxPodSandboxStatus{Namespaces: &runtimeapi.Namespace{Network: &fakeNS, Options: &runtimeapi.NamespaceOption{HostNetwork: &hostNetwork}}},
|
||||
Network: &runtimeapi.PodSandboxNetworkStatus{Ip: fakeIP},
|
||||
Linux: &runtimeapi.LinuxPodSandboxStatus{Namespaces: &runtimeapi.Namespace{Network: fakeNS, Options: &runtimeapi.NamespaceOption{HostNetwork: hostNetwork}}},
|
||||
Labels: labels,
|
||||
Annotations: annotations,
|
||||
}
|
||||
|
||||
// Create the sandbox.
|
||||
fClock.SetTime(time.Now())
|
||||
*expected.CreatedAt = fClock.Now().UnixNano()
|
||||
expected.CreatedAt = fClock.Now().UnixNano()
|
||||
id, err := ds.RunPodSandbox(config)
|
||||
|
||||
// Check internal labels
|
||||
|
@ -122,13 +122,13 @@ func TestSandboxStatus(t *testing.T) {
|
|||
assert.Equal(t, c.Config.Labels[containerTypeLabelKey], containerTypeLabelSandbox)
|
||||
assert.Equal(t, c.Config.Labels[types.KubernetesContainerNameLabel], sandboxContainerName)
|
||||
|
||||
expected.Id = &id // ID is only known after the creation.
|
||||
expected.Id = id // ID is only known after the creation.
|
||||
status, err := ds.PodSandboxStatus(id)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, expected, status)
|
||||
|
||||
// Stop the sandbox.
|
||||
*expected.State = runtimeapi.PodSandboxState_SANDBOX_NOTREADY
|
||||
expected.State = runtimeapi.PodSandboxState_SANDBOX_NOTREADY
|
||||
err = ds.StopPodSandbox(id)
|
||||
assert.NoError(t, err)
|
||||
status, err = ds.PodSandboxStatus(id)
|
||||
|
@ -189,7 +189,7 @@ func TestHostNetworkPluginInvocation(t *testing.T) {
|
|||
c.Linux = &runtimeapi.LinuxPodSandboxConfig{
|
||||
SecurityContext: &runtimeapi.LinuxSandboxSecurityContext{
|
||||
NamespaceOptions: &runtimeapi.NamespaceOption{
|
||||
HostNetwork: &hostNetwork,
|
||||
HostNetwork: hostNetwork,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
|
41
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_service.go
generated
vendored
41
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_service.go
generated
vendored
|
@ -21,7 +21,6 @@ import (
|
|||
"net/http"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/golang/protobuf/proto"
|
||||
|
||||
"k8s.io/kubernetes/pkg/apis/componentconfig"
|
||||
internalapi "k8s.io/kubernetes/pkg/kubelet/api"
|
||||
|
@ -102,7 +101,7 @@ var internalLabelKeys []string = []string{containerTypeLabelKey, containerLogPat
|
|||
|
||||
// NOTE: Anything passed to DockerService should be eventually handled in another way when we switch to running the shim as a different process.
|
||||
func NewDockerService(client dockertools.DockerInterface, seccompProfileRoot string, podSandboxImage string, streamingConfig *streaming.Config,
|
||||
pluginSettings *NetworkPluginSettings, cgroupsName string, kubeCgroupDriver string) (DockerService, error) {
|
||||
pluginSettings *NetworkPluginSettings, cgroupsName string, kubeCgroupDriver string, execHandler dockertools.ExecHandler) (DockerService, error) {
|
||||
c := dockertools.NewInstrumentedDockerInterface(client)
|
||||
ds := &dockerService{
|
||||
seccompProfileRoot: seccompProfileRoot,
|
||||
|
@ -110,10 +109,8 @@ func NewDockerService(client dockertools.DockerInterface, seccompProfileRoot str
|
|||
os: kubecontainer.RealOS{},
|
||||
podSandboxImage: podSandboxImage,
|
||||
streamingRuntime: &streamingRuntime{
|
||||
client: client,
|
||||
// Only the native exec handling is supported for now.
|
||||
// TODO(#35747) - Either deprecate nsenter exec handling, or add support for it here.
|
||||
execHandler: &dockertools.NativeExecHandler{},
|
||||
client: client,
|
||||
execHandler: execHandler,
|
||||
},
|
||||
containerManager: cm.NewContainerManager(cgroupsName, client),
|
||||
}
|
||||
|
@ -191,10 +188,10 @@ func (ds *dockerService) Version(_ string) (*runtimeapi.VersionResponse, error)
|
|||
// suffix to remedy this.
|
||||
apiVersion := fmt.Sprintf("%s.0", v.APIVersion)
|
||||
return &runtimeapi.VersionResponse{
|
||||
Version: &runtimeAPIVersion,
|
||||
RuntimeName: &name,
|
||||
RuntimeVersion: &v.Version,
|
||||
RuntimeApiVersion: &apiVersion,
|
||||
Version: runtimeAPIVersion,
|
||||
RuntimeName: name,
|
||||
RuntimeVersion: v.Version,
|
||||
RuntimeApiVersion: apiVersion,
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
@ -204,9 +201,9 @@ func (ds *dockerService) UpdateRuntimeConfig(runtimeConfig *runtimeapi.RuntimeCo
|
|||
return
|
||||
}
|
||||
glog.Infof("docker cri received runtime config %+v", runtimeConfig)
|
||||
if ds.networkPlugin != nil && runtimeConfig.NetworkConfig.PodCidr != nil {
|
||||
if ds.networkPlugin != nil && runtimeConfig.NetworkConfig.PodCidr != "" {
|
||||
event := make(map[string]interface{})
|
||||
event[network.NET_PLUGIN_EVENT_POD_CIDR_CHANGE_DETAIL_CIDR] = *runtimeConfig.NetworkConfig.PodCidr
|
||||
event[network.NET_PLUGIN_EVENT_POD_CIDR_CHANGE_DETAIL_CIDR] = runtimeConfig.NetworkConfig.PodCidr
|
||||
ds.networkPlugin.Event(network.NET_PLUGIN_EVENT_POD_CIDR_CHANGE, event)
|
||||
}
|
||||
return
|
||||
|
@ -246,23 +243,23 @@ func (ds *dockerService) Start() error {
|
|||
// TODO(random-liu): Set network condition accordingly here.
|
||||
func (ds *dockerService) Status() (*runtimeapi.RuntimeStatus, error) {
|
||||
runtimeReady := &runtimeapi.RuntimeCondition{
|
||||
Type: proto.String(runtimeapi.RuntimeReady),
|
||||
Status: proto.Bool(true),
|
||||
Type: runtimeapi.RuntimeReady,
|
||||
Status: true,
|
||||
}
|
||||
networkReady := &runtimeapi.RuntimeCondition{
|
||||
Type: proto.String(runtimeapi.NetworkReady),
|
||||
Status: proto.Bool(true),
|
||||
Type: runtimeapi.NetworkReady,
|
||||
Status: true,
|
||||
}
|
||||
conditions := []*runtimeapi.RuntimeCondition{runtimeReady, networkReady}
|
||||
if _, err := ds.client.Version(); err != nil {
|
||||
runtimeReady.Status = proto.Bool(false)
|
||||
runtimeReady.Reason = proto.String("DockerDaemonNotReady")
|
||||
runtimeReady.Message = proto.String(fmt.Sprintf("docker: failed to get docker version: %v", err))
|
||||
runtimeReady.Status = false
|
||||
runtimeReady.Reason = "DockerDaemonNotReady"
|
||||
runtimeReady.Message = fmt.Sprintf("docker: failed to get docker version: %v", err)
|
||||
}
|
||||
if err := ds.networkPlugin.Status(); err != nil {
|
||||
networkReady.Status = proto.Bool(false)
|
||||
networkReady.Reason = proto.String("NetworkPluginNotReady")
|
||||
networkReady.Message = proto.String(fmt.Sprintf("docker: network plugin is not ready: %v", err))
|
||||
networkReady.Status = false
|
||||
networkReady.Reason = "NetworkPluginNotReady"
|
||||
networkReady.Message = fmt.Sprintf("docker: network plugin is not ready: %v", err)
|
||||
}
|
||||
return &runtimeapi.RuntimeStatus{Conditions: conditions}, nil
|
||||
}
|
||||
|
|
8
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_service_test.go
generated
vendored
8
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_service_test.go
generated
vendored
|
@ -24,12 +24,12 @@ import (
|
|||
"github.com/golang/mock/gomock"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"k8s.io/client-go/util/clock"
|
||||
runtimeapi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
||||
containertest "k8s.io/kubernetes/pkg/kubelet/container/testing"
|
||||
"k8s.io/kubernetes/pkg/kubelet/dockertools"
|
||||
"k8s.io/kubernetes/pkg/kubelet/network"
|
||||
"k8s.io/kubernetes/pkg/kubelet/network/mock_network"
|
||||
"k8s.io/kubernetes/pkg/util/clock"
|
||||
)
|
||||
|
||||
// newTestNetworkPlugin returns a mock plugin that implements network.NetworkPlugin
|
||||
|
@ -40,7 +40,7 @@ func newTestNetworkPlugin(t *testing.T) *mock_network.MockNetworkPlugin {
|
|||
|
||||
func newTestDockerService() (*dockerService, *dockertools.FakeDockerClient, *clock.FakeClock) {
|
||||
fakeClock := clock.NewFakeClock(time.Time{})
|
||||
c := dockertools.NewFakeDockerClientWithClock(fakeClock)
|
||||
c := dockertools.NewFakeDockerClient().WithClock(fakeClock)
|
||||
return &dockerService{client: c, os: &containertest.FakeOS{}, networkPlugin: &network.NoopNetworkPlugin{}}, c, fakeClock
|
||||
}
|
||||
|
||||
|
@ -53,8 +53,8 @@ func TestStatus(t *testing.T) {
|
|||
assert.Equal(t, len(expected), len(conditions))
|
||||
for k, v := range expected {
|
||||
for _, c := range conditions {
|
||||
if k == c.GetType() {
|
||||
assert.Equal(t, v, c.GetStatus())
|
||||
if k == c.Type {
|
||||
assert.Equal(t, v, c.Status)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
6
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_streaming.go
generated
vendored
6
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_streaming.go
generated
vendored
|
@ -86,7 +86,7 @@ func (ds *dockerService) Exec(req *runtimeapi.ExecRequest) (*runtimeapi.ExecResp
|
|||
if ds.streamingServer == nil {
|
||||
return nil, streaming.ErrorStreamingDisabled("exec")
|
||||
}
|
||||
_, err := checkContainerStatus(ds.client, req.GetContainerId())
|
||||
_, err := checkContainerStatus(ds.client, req.ContainerId)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -98,7 +98,7 @@ func (ds *dockerService) Attach(req *runtimeapi.AttachRequest) (*runtimeapi.Atta
|
|||
if ds.streamingServer == nil {
|
||||
return nil, streaming.ErrorStreamingDisabled("attach")
|
||||
}
|
||||
_, err := checkContainerStatus(ds.client, req.GetContainerId())
|
||||
_, err := checkContainerStatus(ds.client, req.ContainerId)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -110,7 +110,7 @@ func (ds *dockerService) PortForward(req *runtimeapi.PortForwardRequest) (*runti
|
|||
if ds.streamingServer == nil {
|
||||
return nil, streaming.ErrorStreamingDisabled("port forward")
|
||||
}
|
||||
_, err := checkContainerStatus(ds.client, req.GetPodSandboxId())
|
||||
_, err := checkContainerStatus(ds.client, req.PodSandboxId)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
24
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/helpers.go
generated
vendored
24
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/helpers.go
generated
vendored
|
@ -64,7 +64,7 @@ func (v apiVersion) Compare(other string) (int, error) {
|
|||
// '<key>=<value>', which can be understood by docker.
|
||||
func generateEnvList(envs []*runtimeapi.KeyValue) (result []string) {
|
||||
for _, env := range envs {
|
||||
result = append(result, fmt.Sprintf("%s=%s", env.GetKey(), env.GetValue()))
|
||||
result = append(result, fmt.Sprintf("%s=%s", env.Key, env.Value))
|
||||
}
|
||||
return
|
||||
}
|
||||
|
@ -129,8 +129,8 @@ func extractLabels(input map[string]string) (map[string]string, map[string]strin
|
|||
// relabeling and the pod provides an SELinux label
|
||||
func generateMountBindings(mounts []*runtimeapi.Mount) (result []string) {
|
||||
for _, m := range mounts {
|
||||
bind := fmt.Sprintf("%s:%s", m.GetHostPath(), m.GetContainerPath())
|
||||
readOnly := m.GetReadonly()
|
||||
bind := fmt.Sprintf("%s:%s", m.HostPath, m.ContainerPath)
|
||||
readOnly := m.Readonly
|
||||
if readOnly {
|
||||
bind += ":ro"
|
||||
}
|
||||
|
@ -138,7 +138,7 @@ func generateMountBindings(mounts []*runtimeapi.Mount) (result []string) {
|
|||
// does not provide an SELinux context relabeling will label the volume with
|
||||
// the container's randomly allocated MCS label. This would restrict access
|
||||
// to the volume to the container which mounts it first.
|
||||
if m.GetSelinuxRelabel() {
|
||||
if m.SelinuxRelabel {
|
||||
if readOnly {
|
||||
bind += ",Z"
|
||||
} else {
|
||||
|
@ -154,16 +154,16 @@ func makePortsAndBindings(pm []*runtimeapi.PortMapping) (map[dockernat.Port]stru
|
|||
exposedPorts := map[dockernat.Port]struct{}{}
|
||||
portBindings := map[dockernat.Port][]dockernat.PortBinding{}
|
||||
for _, port := range pm {
|
||||
exteriorPort := port.GetHostPort()
|
||||
exteriorPort := port.HostPort
|
||||
if exteriorPort == 0 {
|
||||
// No need to do port binding when HostPort is not specified
|
||||
continue
|
||||
}
|
||||
interiorPort := port.GetContainerPort()
|
||||
interiorPort := port.ContainerPort
|
||||
// Some of this port stuff is under-documented voodoo.
|
||||
// See http://stackoverflow.com/questions/20428302/binding-a-port-to-a-host-interface-using-the-rest-api
|
||||
var protocol string
|
||||
switch strings.ToUpper(string(port.GetProtocol())) {
|
||||
switch strings.ToUpper(string(port.Protocol)) {
|
||||
case "UDP":
|
||||
protocol = "/udp"
|
||||
case "TCP":
|
||||
|
@ -178,7 +178,7 @@ func makePortsAndBindings(pm []*runtimeapi.PortMapping) (map[dockernat.Port]stru
|
|||
|
||||
hostBinding := dockernat.PortBinding{
|
||||
HostPort: strconv.Itoa(int(exteriorPort)),
|
||||
HostIP: port.GetHostIp(),
|
||||
HostIP: port.HostIp,
|
||||
}
|
||||
|
||||
// Allow multiple host ports bind to same docker port
|
||||
|
@ -272,20 +272,20 @@ func (f *dockerFilter) AddLabel(key, value string) {
|
|||
|
||||
// getUserFromImageUser gets uid or user name of the image user.
|
||||
// If user is numeric, it will be treated as uid; or else, it is treated as user name.
|
||||
func getUserFromImageUser(imageUser string) (*int64, *string) {
|
||||
func getUserFromImageUser(imageUser string) (*int64, string) {
|
||||
user := dockertools.GetUserFromImageUser(imageUser)
|
||||
// return both nil if user is not specified in the image.
|
||||
if user == "" {
|
||||
return nil, nil
|
||||
return nil, ""
|
||||
}
|
||||
// user could be either uid or user name. Try to interpret as numeric uid.
|
||||
uid, err := strconv.ParseInt(user, 10, 64)
|
||||
if err != nil {
|
||||
// If user is non numeric, assume it's user name.
|
||||
return nil, &user
|
||||
return nil, user
|
||||
}
|
||||
// If user is a numeric uid.
|
||||
return &uid, nil
|
||||
return &uid, ""
|
||||
}
|
||||
|
||||
// See #33189. If the previous attempt to create a sandbox container name FOO
|
||||
|
|
7
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/helpers_test.go
generated
vendored
7
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/helpers_test.go
generated
vendored
|
@ -192,11 +192,10 @@ func TestGetSystclsFromAnnotations(t *testing.T) {
|
|||
// TestGetUserFromImageUser tests the logic of getting image uid or user name of image user.
|
||||
func TestGetUserFromImageUser(t *testing.T) {
|
||||
newI64 := func(i int64) *int64 { return &i }
|
||||
newStr := func(s string) *string { return &s }
|
||||
for c, test := range map[string]struct {
|
||||
user string
|
||||
uid *int64
|
||||
name *string
|
||||
name string
|
||||
}{
|
||||
"no gid": {
|
||||
user: "0",
|
||||
|
@ -215,11 +214,11 @@ func TestGetUserFromImageUser(t *testing.T) {
|
|||
},
|
||||
"root username": {
|
||||
user: "root:root",
|
||||
name: newStr("root"),
|
||||
name: "root",
|
||||
},
|
||||
"username": {
|
||||
user: "test:test",
|
||||
name: newStr("test"),
|
||||
name: "test",
|
||||
},
|
||||
} {
|
||||
t.Logf("TestCase - %q", c)
|
||||
|
|
36
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/naming.go
generated
vendored
36
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/naming.go
generated
vendored
|
@ -57,23 +57,23 @@ const (
|
|||
|
||||
func makeSandboxName(s *runtimeapi.PodSandboxConfig) string {
|
||||
return strings.Join([]string{
|
||||
kubePrefix, // 0
|
||||
sandboxContainerName, // 1
|
||||
s.Metadata.GetName(), // 2
|
||||
s.Metadata.GetNamespace(), // 3
|
||||
s.Metadata.GetUid(), // 4
|
||||
fmt.Sprintf("%d", s.Metadata.GetAttempt()), // 5
|
||||
kubePrefix, // 0
|
||||
sandboxContainerName, // 1
|
||||
s.Metadata.Name, // 2
|
||||
s.Metadata.Namespace, // 3
|
||||
s.Metadata.Uid, // 4
|
||||
fmt.Sprintf("%d", s.Metadata.Attempt), // 5
|
||||
}, nameDelimiter)
|
||||
}
|
||||
|
||||
func makeContainerName(s *runtimeapi.PodSandboxConfig, c *runtimeapi.ContainerConfig) string {
|
||||
return strings.Join([]string{
|
||||
kubePrefix, // 0
|
||||
c.Metadata.GetName(), // 1:
|
||||
s.Metadata.GetName(), // 2: sandbox name
|
||||
s.Metadata.GetNamespace(), // 3: sandbox namesapce
|
||||
s.Metadata.GetUid(), // 4 sandbox uid
|
||||
fmt.Sprintf("%d", c.Metadata.GetAttempt()), // 5
|
||||
kubePrefix, // 0
|
||||
c.Metadata.Name, // 1:
|
||||
s.Metadata.Name, // 2: sandbox name
|
||||
s.Metadata.Namespace, // 3: sandbox namesapce
|
||||
s.Metadata.Uid, // 4 sandbox uid
|
||||
fmt.Sprintf("%d", c.Metadata.Attempt), // 5
|
||||
}, nameDelimiter)
|
||||
|
||||
}
|
||||
|
@ -105,10 +105,10 @@ func parseSandboxName(name string) (*runtimeapi.PodSandboxMetadata, error) {
|
|||
}
|
||||
|
||||
return &runtimeapi.PodSandboxMetadata{
|
||||
Name: &parts[2],
|
||||
Namespace: &parts[3],
|
||||
Uid: &parts[4],
|
||||
Attempt: &attempt,
|
||||
Name: parts[2],
|
||||
Namespace: parts[3],
|
||||
Uid: parts[4],
|
||||
Attempt: attempt,
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
@ -131,7 +131,7 @@ func parseContainerName(name string) (*runtimeapi.ContainerMetadata, error) {
|
|||
}
|
||||
|
||||
return &runtimeapi.ContainerMetadata{
|
||||
Name: &parts[1],
|
||||
Attempt: &attempt,
|
||||
Name: parts[1],
|
||||
Attempt: attempt,
|
||||
}, nil
|
||||
}
|
||||
|
|
4
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/naming_test.go
generated
vendored
4
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/naming_test.go
generated
vendored
|
@ -55,8 +55,8 @@ func TestContainerNameRoundTrip(t *testing.T) {
|
|||
name, attempt := "pause", uint32(5)
|
||||
config := &runtimeapi.ContainerConfig{
|
||||
Metadata: &runtimeapi.ContainerMetadata{
|
||||
Name: &name,
|
||||
Attempt: &attempt,
|
||||
Name: name,
|
||||
Attempt: attempt,
|
||||
},
|
||||
}
|
||||
actualName := makeContainerName(sConfig, config)
|
||||
|
|
28
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/remote/docker_service.go
generated
vendored
28
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/remote/docker_service.go
generated
vendored
|
@ -47,7 +47,7 @@ func NewDockerService(s dockershim.DockerService) DockerService {
|
|||
}
|
||||
|
||||
func (d *dockerService) Version(ctx context.Context, r *runtimeapi.VersionRequest) (*runtimeapi.VersionResponse, error) {
|
||||
return d.runtimeService.Version(r.GetVersion())
|
||||
return d.runtimeService.Version(r.Version)
|
||||
}
|
||||
|
||||
func (d *dockerService) Status(ctx context.Context, r *runtimeapi.StatusRequest) (*runtimeapi.StatusResponse, error) {
|
||||
|
@ -63,11 +63,11 @@ func (d *dockerService) RunPodSandbox(ctx context.Context, r *runtimeapi.RunPodS
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &runtimeapi.RunPodSandboxResponse{PodSandboxId: &podSandboxId}, nil
|
||||
return &runtimeapi.RunPodSandboxResponse{PodSandboxId: podSandboxId}, nil
|
||||
}
|
||||
|
||||
func (d *dockerService) StopPodSandbox(ctx context.Context, r *runtimeapi.StopPodSandboxRequest) (*runtimeapi.StopPodSandboxResponse, error) {
|
||||
err := d.runtimeService.StopPodSandbox(r.GetPodSandboxId())
|
||||
err := d.runtimeService.StopPodSandbox(r.PodSandboxId)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -75,7 +75,7 @@ func (d *dockerService) StopPodSandbox(ctx context.Context, r *runtimeapi.StopPo
|
|||
}
|
||||
|
||||
func (d *dockerService) RemovePodSandbox(ctx context.Context, r *runtimeapi.RemovePodSandboxRequest) (*runtimeapi.RemovePodSandboxResponse, error) {
|
||||
err := d.runtimeService.RemovePodSandbox(r.GetPodSandboxId())
|
||||
err := d.runtimeService.RemovePodSandbox(r.PodSandboxId)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -83,7 +83,7 @@ func (d *dockerService) RemovePodSandbox(ctx context.Context, r *runtimeapi.Remo
|
|||
}
|
||||
|
||||
func (d *dockerService) PodSandboxStatus(ctx context.Context, r *runtimeapi.PodSandboxStatusRequest) (*runtimeapi.PodSandboxStatusResponse, error) {
|
||||
podSandboxStatus, err := d.runtimeService.PodSandboxStatus(r.GetPodSandboxId())
|
||||
podSandboxStatus, err := d.runtimeService.PodSandboxStatus(r.PodSandboxId)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -99,15 +99,15 @@ func (d *dockerService) ListPodSandbox(ctx context.Context, r *runtimeapi.ListPo
|
|||
}
|
||||
|
||||
func (d *dockerService) CreateContainer(ctx context.Context, r *runtimeapi.CreateContainerRequest) (*runtimeapi.CreateContainerResponse, error) {
|
||||
containerId, err := d.runtimeService.CreateContainer(r.GetPodSandboxId(), r.GetConfig(), r.GetSandboxConfig())
|
||||
containerId, err := d.runtimeService.CreateContainer(r.PodSandboxId, r.GetConfig(), r.GetSandboxConfig())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &runtimeapi.CreateContainerResponse{ContainerId: &containerId}, nil
|
||||
return &runtimeapi.CreateContainerResponse{ContainerId: containerId}, nil
|
||||
}
|
||||
|
||||
func (d *dockerService) StartContainer(ctx context.Context, r *runtimeapi.StartContainerRequest) (*runtimeapi.StartContainerResponse, error) {
|
||||
err := d.runtimeService.StartContainer(r.GetContainerId())
|
||||
err := d.runtimeService.StartContainer(r.ContainerId)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -115,7 +115,7 @@ func (d *dockerService) StartContainer(ctx context.Context, r *runtimeapi.StartC
|
|||
}
|
||||
|
||||
func (d *dockerService) StopContainer(ctx context.Context, r *runtimeapi.StopContainerRequest) (*runtimeapi.StopContainerResponse, error) {
|
||||
err := d.runtimeService.StopContainer(r.GetContainerId(), r.GetTimeout())
|
||||
err := d.runtimeService.StopContainer(r.ContainerId, r.Timeout)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -123,7 +123,7 @@ func (d *dockerService) StopContainer(ctx context.Context, r *runtimeapi.StopCon
|
|||
}
|
||||
|
||||
func (d *dockerService) RemoveContainer(ctx context.Context, r *runtimeapi.RemoveContainerRequest) (*runtimeapi.RemoveContainerResponse, error) {
|
||||
err := d.runtimeService.RemoveContainer(r.GetContainerId())
|
||||
err := d.runtimeService.RemoveContainer(r.ContainerId)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -139,7 +139,7 @@ func (d *dockerService) ListContainers(ctx context.Context, r *runtimeapi.ListCo
|
|||
}
|
||||
|
||||
func (d *dockerService) ContainerStatus(ctx context.Context, r *runtimeapi.ContainerStatusRequest) (*runtimeapi.ContainerStatusResponse, error) {
|
||||
status, err := d.runtimeService.ContainerStatus(r.GetContainerId())
|
||||
status, err := d.runtimeService.ContainerStatus(r.ContainerId)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -147,7 +147,7 @@ func (d *dockerService) ContainerStatus(ctx context.Context, r *runtimeapi.Conta
|
|||
}
|
||||
|
||||
func (d *dockerService) ExecSync(ctx context.Context, r *runtimeapi.ExecSyncRequest) (*runtimeapi.ExecSyncResponse, error) {
|
||||
stdout, stderr, err := d.runtimeService.ExecSync(r.GetContainerId(), r.GetCmd(), time.Duration(r.GetTimeout())*time.Second)
|
||||
stdout, stderr, err := d.runtimeService.ExecSync(r.ContainerId, r.Cmd, time.Duration(r.Timeout)*time.Second)
|
||||
var exitCode int32
|
||||
if err != nil {
|
||||
exitError, ok := err.(utilexec.ExitError)
|
||||
|
@ -159,7 +159,7 @@ func (d *dockerService) ExecSync(ctx context.Context, r *runtimeapi.ExecSyncRequ
|
|||
return &runtimeapi.ExecSyncResponse{
|
||||
Stdout: stdout,
|
||||
Stderr: stderr,
|
||||
ExitCode: &exitCode,
|
||||
ExitCode: exitCode,
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
@ -204,7 +204,7 @@ func (d *dockerService) PullImage(ctx context.Context, r *runtimeapi.PullImageRe
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &runtimeapi.PullImageResponse{ImageRef: &image}, nil
|
||||
return &runtimeapi.PullImageResponse{ImageRef: image}, nil
|
||||
}
|
||||
|
||||
func (d *dockerService) RemoveImage(ctx context.Context, r *runtimeapi.RemoveImageRequest) (*runtimeapi.RemoveImageResponse, error) {
|
||||
|
|
38
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/security_context.go
generated
vendored
38
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/security_context.go
generated
vendored
|
@ -69,10 +69,10 @@ func modifyContainerConfig(sc *runtimeapi.LinuxContainerSecurityContext, config
|
|||
return
|
||||
}
|
||||
if sc.RunAsUser != nil {
|
||||
config.User = strconv.FormatInt(sc.GetRunAsUser(), 10)
|
||||
config.User = strconv.FormatInt(sc.GetRunAsUser().Value, 10)
|
||||
}
|
||||
if sc.RunAsUsername != nil {
|
||||
config.User = sc.GetRunAsUsername()
|
||||
if sc.RunAsUsername != "" {
|
||||
config.User = sc.RunAsUsername
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -88,24 +88,20 @@ func modifyHostConfig(sc *runtimeapi.LinuxContainerSecurityContext, hostConfig *
|
|||
}
|
||||
|
||||
// Apply security context for the container.
|
||||
if sc.Privileged != nil {
|
||||
hostConfig.Privileged = sc.GetPrivileged()
|
||||
}
|
||||
if sc.ReadonlyRootfs != nil {
|
||||
hostConfig.ReadonlyRootfs = sc.GetReadonlyRootfs()
|
||||
}
|
||||
hostConfig.Privileged = sc.Privileged
|
||||
hostConfig.ReadonlyRootfs = sc.ReadonlyRootfs
|
||||
if sc.Capabilities != nil {
|
||||
hostConfig.CapAdd = sc.GetCapabilities().GetAddCapabilities()
|
||||
hostConfig.CapDrop = sc.GetCapabilities().GetDropCapabilities()
|
||||
hostConfig.CapAdd = sc.GetCapabilities().AddCapabilities
|
||||
hostConfig.CapDrop = sc.GetCapabilities().DropCapabilities
|
||||
}
|
||||
if sc.SelinuxOptions != nil {
|
||||
hostConfig.SecurityOpt = securitycontext.ModifySecurityOptions(
|
||||
hostConfig.SecurityOpt,
|
||||
&v1.SELinuxOptions{
|
||||
User: sc.SelinuxOptions.GetUser(),
|
||||
Role: sc.SelinuxOptions.GetRole(),
|
||||
Type: sc.SelinuxOptions.GetType(),
|
||||
Level: sc.SelinuxOptions.GetLevel(),
|
||||
User: sc.SelinuxOptions.User,
|
||||
Role: sc.SelinuxOptions.Role,
|
||||
Type: sc.SelinuxOptions.Type,
|
||||
Level: sc.SelinuxOptions.Level,
|
||||
},
|
||||
)
|
||||
}
|
||||
|
@ -114,22 +110,26 @@ func modifyHostConfig(sc *runtimeapi.LinuxContainerSecurityContext, hostConfig *
|
|||
// modifySandboxNamespaceOptions apply namespace options for sandbox
|
||||
func modifySandboxNamespaceOptions(nsOpts *runtimeapi.NamespaceOption, hostConfig *dockercontainer.HostConfig, networkPlugin network.NetworkPlugin) {
|
||||
modifyCommonNamespaceOptions(nsOpts, hostConfig)
|
||||
modifyHostNetworkOptionForSandbox(nsOpts.GetHostNetwork(), networkPlugin, hostConfig)
|
||||
modifyHostNetworkOptionForSandbox(nsOpts.HostNetwork, networkPlugin, hostConfig)
|
||||
}
|
||||
|
||||
// modifyContainerNamespaceOptions apply namespace options for container
|
||||
func modifyContainerNamespaceOptions(nsOpts *runtimeapi.NamespaceOption, sandboxID string, hostConfig *dockercontainer.HostConfig) {
|
||||
hostNetwork := false
|
||||
if nsOpts != nil {
|
||||
hostNetwork = nsOpts.HostNetwork
|
||||
}
|
||||
modifyCommonNamespaceOptions(nsOpts, hostConfig)
|
||||
modifyHostNetworkOptionForContainer(nsOpts.GetHostNetwork(), sandboxID, hostConfig)
|
||||
modifyHostNetworkOptionForContainer(hostNetwork, sandboxID, hostConfig)
|
||||
}
|
||||
|
||||
// modifyCommonNamespaceOptions apply common namespace options for sandbox and container
|
||||
func modifyCommonNamespaceOptions(nsOpts *runtimeapi.NamespaceOption, hostConfig *dockercontainer.HostConfig) {
|
||||
if nsOpts != nil {
|
||||
if nsOpts.GetHostPid() {
|
||||
if nsOpts.HostPid {
|
||||
hostConfig.PidMode = namespaceModeHost
|
||||
}
|
||||
if nsOpts.GetHostIpc() {
|
||||
if nsOpts.HostIpc {
|
||||
hostConfig.IpcMode = namespaceModeHost
|
||||
}
|
||||
}
|
||||
|
|
34
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/security_context_test.go
generated
vendored
34
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/security_context_test.go
generated
vendored
|
@ -30,7 +30,7 @@ import (
|
|||
|
||||
func TestModifyContainerConfig(t *testing.T) {
|
||||
var uid int64 = 123
|
||||
var username string = "testuser"
|
||||
var username = "testuser"
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
|
@ -40,7 +40,7 @@ func TestModifyContainerConfig(t *testing.T) {
|
|||
{
|
||||
name: "container.SecurityContext.RunAsUser set",
|
||||
sc: &runtimeapi.LinuxContainerSecurityContext{
|
||||
RunAsUser: &uid,
|
||||
RunAsUser: &runtimeapi.Int64Value{Value: uid},
|
||||
},
|
||||
expected: &dockercontainer.Config{
|
||||
User: strconv.FormatInt(uid, 10),
|
||||
|
@ -49,7 +49,7 @@ func TestModifyContainerConfig(t *testing.T) {
|
|||
{
|
||||
name: "container.SecurityContext.RunAsUsername set",
|
||||
sc: &runtimeapi.LinuxContainerSecurityContext{
|
||||
RunAsUsername: &username,
|
||||
RunAsUsername: username,
|
||||
},
|
||||
expected: &dockercontainer.Config{
|
||||
User: username,
|
||||
|
@ -70,10 +70,9 @@ func TestModifyContainerConfig(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestModifyHostConfig(t *testing.T) {
|
||||
priv := true
|
||||
setNetworkHC := &dockercontainer.HostConfig{}
|
||||
setPrivSC := &runtimeapi.LinuxContainerSecurityContext{}
|
||||
setPrivSC.Privileged = &priv
|
||||
setPrivSC.Privileged = true
|
||||
setPrivHC := &dockercontainer.HostConfig{
|
||||
Privileged: true,
|
||||
}
|
||||
|
@ -168,7 +167,7 @@ func TestModifyHostConfigAndNamespaceOptionsForContainer(t *testing.T) {
|
|||
sandboxID := "sandbox"
|
||||
sandboxNSMode := fmt.Sprintf("container:%v", sandboxID)
|
||||
setPrivSC := &runtimeapi.LinuxContainerSecurityContext{}
|
||||
setPrivSC.Privileged = &priv
|
||||
setPrivSC.Privileged = priv
|
||||
setPrivHC := &dockercontainer.HostConfig{
|
||||
Privileged: true,
|
||||
IpcMode: dockercontainer.IpcMode(sandboxNSMode),
|
||||
|
@ -235,7 +234,7 @@ func TestModifySandboxNamespaceOptions(t *testing.T) {
|
|||
{
|
||||
name: "NamespaceOption.HostNetwork",
|
||||
nsOpt: &runtimeapi.NamespaceOption{
|
||||
HostNetwork: &set,
|
||||
HostNetwork: set,
|
||||
},
|
||||
expected: &dockercontainer.HostConfig{
|
||||
NetworkMode: namespaceModeHost,
|
||||
|
@ -244,7 +243,7 @@ func TestModifySandboxNamespaceOptions(t *testing.T) {
|
|||
{
|
||||
name: "NamespaceOption.HostIpc",
|
||||
nsOpt: &runtimeapi.NamespaceOption{
|
||||
HostIpc: &set,
|
||||
HostIpc: set,
|
||||
},
|
||||
expected: &dockercontainer.HostConfig{
|
||||
IpcMode: namespaceModeHost,
|
||||
|
@ -254,7 +253,7 @@ func TestModifySandboxNamespaceOptions(t *testing.T) {
|
|||
{
|
||||
name: "NamespaceOption.HostPid",
|
||||
nsOpt: &runtimeapi.NamespaceOption{
|
||||
HostPid: &set,
|
||||
HostPid: set,
|
||||
},
|
||||
expected: &dockercontainer.HostConfig{
|
||||
PidMode: namespaceModeHost,
|
||||
|
@ -281,7 +280,7 @@ func TestModifyContainerNamespaceOptions(t *testing.T) {
|
|||
{
|
||||
name: "NamespaceOption.HostNetwork",
|
||||
nsOpt: &runtimeapi.NamespaceOption{
|
||||
HostNetwork: &set,
|
||||
HostNetwork: set,
|
||||
},
|
||||
expected: &dockercontainer.HostConfig{
|
||||
NetworkMode: dockercontainer.NetworkMode(sandboxNSMode),
|
||||
|
@ -292,7 +291,7 @@ func TestModifyContainerNamespaceOptions(t *testing.T) {
|
|||
{
|
||||
name: "NamespaceOption.HostIpc",
|
||||
nsOpt: &runtimeapi.NamespaceOption{
|
||||
HostIpc: &set,
|
||||
HostIpc: set,
|
||||
},
|
||||
expected: &dockercontainer.HostConfig{
|
||||
NetworkMode: dockercontainer.NetworkMode(sandboxNSMode),
|
||||
|
@ -302,7 +301,7 @@ func TestModifyContainerNamespaceOptions(t *testing.T) {
|
|||
{
|
||||
name: "NamespaceOption.HostPid",
|
||||
nsOpt: &runtimeapi.NamespaceOption{
|
||||
HostPid: &set,
|
||||
HostPid: set,
|
||||
},
|
||||
expected: &dockercontainer.HostConfig{
|
||||
NetworkMode: dockercontainer.NetworkMode(sandboxNSMode),
|
||||
|
@ -318,9 +317,8 @@ func TestModifyContainerNamespaceOptions(t *testing.T) {
|
|||
}
|
||||
|
||||
func fullValidSecurityContext() *runtimeapi.LinuxContainerSecurityContext {
|
||||
priv := true
|
||||
return &runtimeapi.LinuxContainerSecurityContext{
|
||||
Privileged: &priv,
|
||||
Privileged: true,
|
||||
Capabilities: inputCapabilities(),
|
||||
SelinuxOptions: inputSELinuxOptions(),
|
||||
}
|
||||
|
@ -340,10 +338,10 @@ func inputSELinuxOptions() *runtimeapi.SELinuxOption {
|
|||
level := "level"
|
||||
|
||||
return &runtimeapi.SELinuxOption{
|
||||
User: &user,
|
||||
Role: &role,
|
||||
Type: &stype,
|
||||
Level: &level,
|
||||
User: user,
|
||||
Role: role,
|
||||
Type: stype,
|
||||
Level: level,
|
||||
}
|
||||
}
|
||||
|
||||
|
|
11
vendor/k8s.io/kubernetes/pkg/kubelet/dockertools/BUILD
generated
vendored
11
vendor/k8s.io/kubernetes/pkg/kubelet/dockertools/BUILD
generated
vendored
|
@ -47,15 +47,15 @@ go_library(
|
|||
"//pkg/kubelet/util/format:go_default_library",
|
||||
"//pkg/security/apparmor:go_default_library",
|
||||
"//pkg/securitycontext:go_default_library",
|
||||
"//pkg/util/clock:go_default_library",
|
||||
"//pkg/util/exec:go_default_library",
|
||||
"//pkg/util/flowcontrol:go_default_library",
|
||||
"//pkg/util/oom:go_default_library",
|
||||
"//pkg/util/procfs:go_default_library",
|
||||
"//pkg/util/selinux:go_default_library",
|
||||
"//pkg/util/strings:go_default_library",
|
||||
"//pkg/util/tail:go_default_library",
|
||||
"//pkg/util/term:go_default_library",
|
||||
"//pkg/util/version:go_default_library",
|
||||
"//vendor:github.com/armon/circbuf",
|
||||
"//vendor:github.com/docker/distribution/digest",
|
||||
"//vendor:github.com/docker/distribution/reference",
|
||||
"//vendor:github.com/docker/docker/pkg/jsonmessage",
|
||||
|
@ -76,6 +76,8 @@ go_library(
|
|||
"//vendor:k8s.io/apimachinery/pkg/util/errors",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/runtime",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/sets",
|
||||
"//vendor:k8s.io/client-go/util/clock",
|
||||
"//vendor:k8s.io/client-go/util/flowcontrol",
|
||||
],
|
||||
)
|
||||
|
||||
|
@ -116,9 +118,7 @@ go_test(
|
|||
"//pkg/kubelet/types:go_default_library",
|
||||
"//pkg/kubelet/util/format:go_default_library",
|
||||
"//pkg/security/apparmor:go_default_library",
|
||||
"//pkg/util/clock:go_default_library",
|
||||
"//pkg/util/exec:go_default_library",
|
||||
"//pkg/util/flowcontrol:go_default_library",
|
||||
"//pkg/util/hash:go_default_library",
|
||||
"//pkg/util/intstr:go_default_library",
|
||||
"//pkg/util/strings:go_default_library",
|
||||
|
@ -130,9 +130,12 @@ go_test(
|
|||
"//vendor:github.com/golang/mock/gomock",
|
||||
"//vendor:github.com/google/cadvisor/info/v1",
|
||||
"//vendor:github.com/stretchr/testify/assert",
|
||||
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
|
||||
"//vendor:k8s.io/apimachinery/pkg/runtime",
|
||||
"//vendor:k8s.io/apimachinery/pkg/types",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/sets",
|
||||
"//vendor:k8s.io/client-go/util/clock",
|
||||
"//vendor:k8s.io/client-go/util/flowcontrol",
|
||||
],
|
||||
)
|
||||
|
||||
|
|
5
vendor/k8s.io/kubernetes/pkg/kubelet/dockertools/container_gc_test.go
generated
vendored
5
vendor/k8s.io/kubernetes/pkg/kubelet/dockertools/container_gc_test.go
generated
vendored
|
@ -24,13 +24,14 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
)
|
||||
|
||||
func newTestContainerGC(t *testing.T) (*containerGC, *FakeDockerClient) {
|
||||
fakeDocker := new(FakeDockerClient)
|
||||
fakeDocker := NewFakeDockerClient()
|
||||
fakePodGetter := newFakePodGetter()
|
||||
gc := NewContainerGC(fakeDocker, fakePodGetter, "")
|
||||
return gc, fakeDocker
|
||||
|
@ -66,7 +67,7 @@ func addPods(podGetter podGetter, podUIDs ...types.UID) {
|
|||
fakePodGetter := podGetter.(*fakePodGetter)
|
||||
for _, uid := range podUIDs {
|
||||
fakePodGetter.pods[uid] = &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod" + string(uid),
|
||||
Namespace: "test",
|
||||
UID: uid,
|
||||
|
|
89
vendor/k8s.io/kubernetes/pkg/kubelet/dockertools/docker_manager.go
generated
vendored
89
vendor/k8s.io/kubernetes/pkg/kubelet/dockertools/docker_manager.go
generated
vendored
|
@ -33,6 +33,7 @@ import (
|
|||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/armon/circbuf"
|
||||
dockertypes "github.com/docker/engine-api/types"
|
||||
dockercontainer "github.com/docker/engine-api/types/container"
|
||||
dockerstrslice "github.com/docker/engine-api/types/strslice"
|
||||
|
@ -47,6 +48,7 @@ import (
|
|||
kubetypes "k8s.io/apimachinery/pkg/types"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/client-go/util/flowcontrol"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/client/record"
|
||||
|
@ -65,11 +67,11 @@ import (
|
|||
"k8s.io/kubernetes/pkg/kubelet/util/format"
|
||||
"k8s.io/kubernetes/pkg/security/apparmor"
|
||||
"k8s.io/kubernetes/pkg/securitycontext"
|
||||
"k8s.io/kubernetes/pkg/util/flowcontrol"
|
||||
"k8s.io/kubernetes/pkg/util/oom"
|
||||
"k8s.io/kubernetes/pkg/util/procfs"
|
||||
"k8s.io/kubernetes/pkg/util/selinux"
|
||||
utilstrings "k8s.io/kubernetes/pkg/util/strings"
|
||||
"k8s.io/kubernetes/pkg/util/tail"
|
||||
"k8s.io/kubernetes/pkg/util/term"
|
||||
utilversion "k8s.io/kubernetes/pkg/util/version"
|
||||
)
|
||||
|
@ -139,9 +141,6 @@ type DockerManager struct {
|
|||
// wrapped image puller.
|
||||
imagePuller images.ImageManager
|
||||
|
||||
// Root of the Docker runtime.
|
||||
dockerRoot string
|
||||
|
||||
// cgroup driver used by Docker runtime.
|
||||
cgroupDriver string
|
||||
|
||||
|
@ -238,10 +237,6 @@ func NewDockerManager(
|
|||
// Wrap the docker client with instrumentedDockerInterface
|
||||
client = NewInstrumentedDockerInterface(client)
|
||||
|
||||
// Work out the location of the Docker runtime, defaulting to /var/lib/docker
|
||||
// if there are any problems.
|
||||
dockerRoot := "/var/lib/docker"
|
||||
|
||||
// cgroup driver is only detectable in docker 1.11+
|
||||
// when the execution driver is not detectable, we provide the cgroupfs form.
|
||||
// if your docker engine is configured to use the systemd cgroup driver, and you
|
||||
|
@ -252,11 +247,7 @@ func NewDockerManager(
|
|||
dockerInfo, err := client.Info()
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to execute Info() call to the Docker client: %v", err)
|
||||
glog.Warningf("Using fallback default of /var/lib/docker for location of Docker runtime")
|
||||
} else {
|
||||
dockerRoot = dockerInfo.DockerRootDir
|
||||
glog.Infof("Setting dockerRoot to %s", dockerRoot)
|
||||
|
||||
cgroupDriver = dockerInfo.CgroupDriver
|
||||
glog.Infof("Setting cgroupDriver to %s", cgroupDriver)
|
||||
}
|
||||
|
@ -269,7 +260,6 @@ func NewDockerManager(
|
|||
machineInfo: machineInfo,
|
||||
podInfraContainerImage: podInfraContainerImage,
|
||||
dockerPuller: newDockerPuller(client),
|
||||
dockerRoot: dockerRoot,
|
||||
cgroupDriver: cgroupDriver,
|
||||
containerLogsDir: containerLogsDir,
|
||||
networkPlugin: networkPlugin,
|
||||
|
@ -482,19 +472,12 @@ func (dm *DockerManager) inspectContainer(id string, podName, podNamespace strin
|
|||
startedAt = createdAt
|
||||
}
|
||||
|
||||
terminationMessagePath := containerInfo.TerminationMessagePath
|
||||
if terminationMessagePath != "" {
|
||||
for _, mount := range iResult.Mounts {
|
||||
if mount.Destination == terminationMessagePath {
|
||||
path := mount.Source
|
||||
if data, err := ioutil.ReadFile(path); err != nil {
|
||||
message = fmt.Sprintf("Error on reading termination-log %s: %v", path, err)
|
||||
} else {
|
||||
message = string(data)
|
||||
}
|
||||
}
|
||||
}
|
||||
// retrieve the termination message from logs, file, or file with fallback to logs in case of failure
|
||||
fallbackToLogs := containerInfo.TerminationMessagePolicy == v1.TerminationMessageFallbackToLogsOnError && (iResult.State.ExitCode != 0 || iResult.State.OOMKilled)
|
||||
if msg := getTerminationMessage(dm.c, iResult, containerInfo.TerminationMessagePath, fallbackToLogs); len(msg) > 0 {
|
||||
message = msg
|
||||
}
|
||||
|
||||
status.State = kubecontainer.ContainerStateExited
|
||||
status.Message = message
|
||||
status.Reason = reason
|
||||
|
@ -508,6 +491,49 @@ func (dm *DockerManager) inspectContainer(id string, podName, podNamespace strin
|
|||
return &status, "", nil
|
||||
}
|
||||
|
||||
func getTerminationMessage(c DockerInterface, iResult *dockertypes.ContainerJSON, terminationMessagePath string, fallbackToLogs bool) string {
|
||||
if len(terminationMessagePath) != 0 {
|
||||
for _, mount := range iResult.Mounts {
|
||||
if mount.Destination != terminationMessagePath {
|
||||
continue
|
||||
}
|
||||
path := mount.Source
|
||||
data, _, err := tail.ReadAtMost(path, kubecontainer.MaxContainerTerminationMessageLength)
|
||||
if err != nil {
|
||||
return fmt.Sprintf("Error on reading termination log %s: %v", path, err)
|
||||
}
|
||||
if !fallbackToLogs || len(data) != 0 {
|
||||
return string(data)
|
||||
}
|
||||
}
|
||||
}
|
||||
if !fallbackToLogs {
|
||||
return ""
|
||||
}
|
||||
|
||||
return readLastStringFromContainerLogs(c, iResult.Name)
|
||||
}
|
||||
|
||||
// readLastStringFromContainerLogs attempts to a certain amount from the end of the logs for containerName.
|
||||
// It will attempt to avoid reading excessive logs from the server, which may result in underestimating the amount
|
||||
// of logs to fetch (such that the length of the response message is < max).
|
||||
func readLastStringFromContainerLogs(c DockerInterface, containerName string) string {
|
||||
logOptions := dockertypes.ContainerLogsOptions{
|
||||
ShowStdout: true,
|
||||
ShowStderr: true,
|
||||
}
|
||||
buf, _ := circbuf.NewBuffer(kubecontainer.MaxContainerTerminationMessageLogLength)
|
||||
streamOptions := StreamOptions{
|
||||
ErrorStream: buf,
|
||||
OutputStream: buf,
|
||||
}
|
||||
logOptions.Tail = strconv.FormatInt(kubecontainer.MaxContainerTerminationMessageLogLines, 10)
|
||||
if err := c.Logs(containerName, logOptions, streamOptions); err != nil {
|
||||
return fmt.Sprintf("Error on reading termination message from logs: %v", err)
|
||||
}
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
// makeEnvList converts EnvVar list to a list of strings, in the form of
|
||||
// '<key>=<value>', which can be understood by docker.
|
||||
func makeEnvList(envs []kubecontainer.EnvVar) (result []string) {
|
||||
|
@ -672,17 +698,24 @@ func (dm *DockerManager) runContainer(
|
|||
fs, err := os.Create(containerLogPath)
|
||||
if err != nil {
|
||||
// TODO: Clean up the previously created dir? return the error?
|
||||
glog.Errorf("Error on creating termination-log file %q: %v", containerLogPath, err)
|
||||
utilruntime.HandleError(fmt.Errorf("error creating termination-log file %q: %v", containerLogPath, err))
|
||||
} else {
|
||||
fs.Close() // Close immediately; we're just doing a `touch` here
|
||||
b := fmt.Sprintf("%s:%s", containerLogPath, container.TerminationMessagePath)
|
||||
|
||||
// Chmod is needed because ioutil.WriteFile() ends up calling
|
||||
// open(2) to create the file, so the final mode used is "mode &
|
||||
// ~umask". But we want to make sure the specified mode is used
|
||||
// in the file no matter what the umask is.
|
||||
if err := os.Chmod(containerLogPath, 0666); err != nil {
|
||||
utilruntime.HandleError(fmt.Errorf("unable to set termination-log file permissions %q: %v", containerLogPath, err))
|
||||
}
|
||||
|
||||
// Have docker relabel the termination log path if SELinux is
|
||||
// enabled.
|
||||
b := fmt.Sprintf("%s:%s", containerLogPath, container.TerminationMessagePath)
|
||||
if selinux.SELinuxEnabled() {
|
||||
b += ":Z"
|
||||
}
|
||||
|
||||
binds = append(binds, b)
|
||||
}
|
||||
}
|
||||
|
|
7
vendor/k8s.io/kubernetes/pkg/kubelet/dockertools/docker_manager_linux_test.go
generated
vendored
7
vendor/k8s.io/kubernetes/pkg/kubelet/dockertools/docker_manager_linux_test.go
generated
vendored
|
@ -27,6 +27,7 @@ import (
|
|||
|
||||
"github.com/golang/mock/gomock"
|
||||
"github.com/stretchr/testify/assert"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/client/record"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
|
@ -282,7 +283,7 @@ func TestCreateAppArmorContanier(t *testing.T) {
|
|||
dm.recorder = recorder
|
||||
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
UID: "12345678",
|
||||
Name: "foo",
|
||||
Namespace: "new",
|
||||
|
@ -417,7 +418,7 @@ func TestGetPodStatusFromNetworkPlugin(t *testing.T) {
|
|||
}{
|
||||
{
|
||||
pod: &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
UID: "12345678",
|
||||
Name: "foo",
|
||||
Namespace: "new",
|
||||
|
@ -435,7 +436,7 @@ func TestGetPodStatusFromNetworkPlugin(t *testing.T) {
|
|||
},
|
||||
{
|
||||
pod: &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
UID: "12345678",
|
||||
Name: "foo",
|
||||
Namespace: "new",
|
||||
|
|
27
vendor/k8s.io/kubernetes/pkg/kubelet/dockertools/docker_manager_test.go
generated
vendored
27
vendor/k8s.io/kubernetes/pkg/kubelet/dockertools/docker_manager_test.go
generated
vendored
|
@ -17,6 +17,7 @@ limitations under the License.
|
|||
package dockertools
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
|
@ -36,9 +37,12 @@ import (
|
|||
"github.com/golang/mock/gomock"
|
||||
cadvisorapi "github.com/google/cadvisor/info/v1"
|
||||
"github.com/stretchr/testify/assert"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
kubetypes "k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/client-go/util/clock"
|
||||
"k8s.io/client-go/util/flowcontrol"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
|
@ -52,12 +56,25 @@ import (
|
|||
nettest "k8s.io/kubernetes/pkg/kubelet/network/testing"
|
||||
proberesults "k8s.io/kubernetes/pkg/kubelet/prober/results"
|
||||
"k8s.io/kubernetes/pkg/kubelet/types"
|
||||
"k8s.io/kubernetes/pkg/util/clock"
|
||||
uexec "k8s.io/kubernetes/pkg/util/exec"
|
||||
"k8s.io/kubernetes/pkg/util/flowcontrol"
|
||||
"k8s.io/kubernetes/pkg/util/intstr"
|
||||
)
|
||||
|
||||
var testTempDir string
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
dir, err := ioutil.TempDir("", "dockertools")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
testTempDir = dir
|
||||
|
||||
flag.Parse()
|
||||
status := m.Run()
|
||||
os.RemoveAll(testTempDir)
|
||||
os.Exit(status)
|
||||
}
|
||||
|
||||
type fakeHTTP struct {
|
||||
url string
|
||||
err error
|
||||
|
@ -80,7 +97,7 @@ func (f *fakeRuntimeHelper) GenerateRunContainerOptions(pod *v1.Pod, container *
|
|||
var opts kubecontainer.RunContainerOptions
|
||||
var err error
|
||||
if len(container.TerminationMessagePath) != 0 {
|
||||
testPodContainerDir, err = ioutil.TempDir("", "fooPodContainerDir")
|
||||
testPodContainerDir, err = ioutil.TempDir(testTempDir, "fooPodContainerDir")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -164,7 +181,7 @@ func newTestDockerManagerWithHTTPClient(fakeHTTPClient *fakeHTTP) (*DockerManage
|
|||
}
|
||||
|
||||
func newTestDockerManagerWithVersion(version, apiVersion string) (*DockerManager, *FakeDockerClient) {
|
||||
fakeDocker := NewFakeDockerClientWithVersion(version, apiVersion)
|
||||
fakeDocker := NewFakeDockerClient().WithVersion(version, apiVersion)
|
||||
return createTestDockerManagerWithFakeImageManager(nil, fakeDocker)
|
||||
}
|
||||
|
||||
|
@ -1918,7 +1935,7 @@ func makePod(name string, spec *v1.PodSpec) *v1.Pod {
|
|||
spec = &v1.PodSpec{Containers: []v1.Container{{Name: "foo"}, {Name: "bar"}}}
|
||||
}
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
UID: "12345678",
|
||||
Name: name,
|
||||
Namespace: "new",
|
||||
|
|
9
vendor/k8s.io/kubernetes/pkg/kubelet/dockertools/exec.go
generated
vendored
9
vendor/k8s.io/kubernetes/pkg/kubelet/dockertools/exec.go
generated
vendored
|
@ -53,6 +53,7 @@ func (*NsenterExecHandler) ExecInContainer(client DockerInterface, container *do
|
|||
args = append(args, container.Config.Env...)
|
||||
args = append(args, cmd...)
|
||||
command := exec.Command(nsenter, args...)
|
||||
var cmdErr error
|
||||
if tty {
|
||||
p, err := kubecontainer.StartPty(command)
|
||||
if err != nil {
|
||||
|
@ -75,7 +76,7 @@ func (*NsenterExecHandler) ExecInContainer(client DockerInterface, container *do
|
|||
go io.Copy(stdout, p)
|
||||
}
|
||||
|
||||
err = command.Wait()
|
||||
cmdErr = command.Wait()
|
||||
} else {
|
||||
if stdin != nil {
|
||||
// Use an os.Pipe here as it returns true *os.File objects.
|
||||
|
@ -97,13 +98,13 @@ func (*NsenterExecHandler) ExecInContainer(client DockerInterface, container *do
|
|||
command.Stderr = stderr
|
||||
}
|
||||
|
||||
err = command.Run()
|
||||
cmdErr = command.Run()
|
||||
}
|
||||
|
||||
if exitErr, ok := err.(*exec.ExitError); ok {
|
||||
if exitErr, ok := cmdErr.(*exec.ExitError); ok {
|
||||
return &utilexec.ExitErrorWrapper{ExitError: exitErr}
|
||||
}
|
||||
return err
|
||||
return cmdErr
|
||||
}
|
||||
|
||||
// NativeExecHandler executes commands in Docker containers using Docker's exec API.
|
||||
|
|
132
vendor/k8s.io/kubernetes/pkg/kubelet/dockertools/fake_docker_client.go
generated
vendored
132
vendor/k8s.io/kubernetes/pkg/kubelet/dockertools/fake_docker_client.go
generated
vendored
|
@ -28,7 +28,7 @@ import (
|
|||
|
||||
dockertypes "github.com/docker/engine-api/types"
|
||||
dockercontainer "github.com/docker/engine-api/types/container"
|
||||
"k8s.io/kubernetes/pkg/util/clock"
|
||||
"k8s.io/client-go/util/clock"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
)
|
||||
|
@ -55,8 +55,9 @@ type FakeDockerClient struct {
|
|||
Errors map[string]error
|
||||
called []calledDetail
|
||||
pulled []string
|
||||
EnableTrace bool
|
||||
|
||||
// Created, Stopped and Removed all container docker ID
|
||||
// Created, Started, Stopped and Removed all contain container docker ID
|
||||
Created []string
|
||||
Started []string
|
||||
Stopped []string
|
||||
|
@ -74,25 +75,64 @@ type FakeDockerClient struct {
|
|||
const fakeDockerVersion = "1.8.1"
|
||||
|
||||
func NewFakeDockerClient() *FakeDockerClient {
|
||||
return NewFakeDockerClientWithVersion(fakeDockerVersion, minimumDockerAPIVersion)
|
||||
}
|
||||
|
||||
func NewFakeDockerClientWithClock(c clock.Clock) *FakeDockerClient {
|
||||
return newClientWithVersionAndClock(fakeDockerVersion, minimumDockerAPIVersion, c)
|
||||
}
|
||||
|
||||
func NewFakeDockerClientWithVersion(version, apiVersion string) *FakeDockerClient {
|
||||
return newClientWithVersionAndClock(version, apiVersion, clock.RealClock{})
|
||||
}
|
||||
|
||||
func newClientWithVersionAndClock(version, apiVersion string, c clock.Clock) *FakeDockerClient {
|
||||
return &FakeDockerClient{
|
||||
VersionInfo: dockertypes.Version{Version: version, APIVersion: apiVersion},
|
||||
VersionInfo: dockertypes.Version{Version: fakeDockerVersion, APIVersion: minimumDockerAPIVersion},
|
||||
Errors: make(map[string]error),
|
||||
ContainerMap: make(map[string]*dockertypes.ContainerJSON),
|
||||
Clock: c,
|
||||
Clock: clock.RealClock{},
|
||||
// default this to an empty result, so that we never have a nil non-error response from InspectImage
|
||||
Image: &dockertypes.ImageInspect{},
|
||||
// default this to true, so that we trace calls, image pulls and container lifecycle
|
||||
EnableTrace: true,
|
||||
}
|
||||
}
|
||||
|
||||
func (f *FakeDockerClient) WithClock(c clock.Clock) *FakeDockerClient {
|
||||
f.Lock()
|
||||
defer f.Unlock()
|
||||
f.Clock = c
|
||||
return f
|
||||
}
|
||||
|
||||
func (f *FakeDockerClient) WithVersion(version, apiVersion string) *FakeDockerClient {
|
||||
f.Lock()
|
||||
defer f.Unlock()
|
||||
f.VersionInfo = dockertypes.Version{Version: version, APIVersion: apiVersion}
|
||||
return f
|
||||
}
|
||||
|
||||
func (f *FakeDockerClient) WithTraceDisabled() *FakeDockerClient {
|
||||
f.Lock()
|
||||
defer f.Unlock()
|
||||
f.EnableTrace = false
|
||||
return f
|
||||
}
|
||||
|
||||
func (f *FakeDockerClient) appendCalled(callDetail calledDetail) {
|
||||
if f.EnableTrace {
|
||||
f.called = append(f.called, callDetail)
|
||||
}
|
||||
}
|
||||
|
||||
func (f *FakeDockerClient) appendPulled(pull string) {
|
||||
if f.EnableTrace {
|
||||
f.pulled = append(f.pulled, pull)
|
||||
}
|
||||
}
|
||||
|
||||
func (f *FakeDockerClient) appendContainerTrace(traceCategory string, containerName string) {
|
||||
if !f.EnableTrace {
|
||||
return
|
||||
}
|
||||
switch traceCategory {
|
||||
case "Created":
|
||||
f.Created = append(f.Created, containerName)
|
||||
case "Started":
|
||||
f.Started = append(f.Started, containerName)
|
||||
case "Stopped":
|
||||
f.Stopped = append(f.Stopped, containerName)
|
||||
case "Removed":
|
||||
f.Removed = append(f.Removed, containerName)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -120,9 +160,10 @@ func (f *FakeDockerClient) ClearCalls() {
|
|||
f.Lock()
|
||||
defer f.Unlock()
|
||||
f.called = []calledDetail{}
|
||||
f.Stopped = []string{}
|
||||
f.pulled = []string{}
|
||||
f.Created = []string{}
|
||||
f.Started = []string{}
|
||||
f.Stopped = []string{}
|
||||
f.Removed = []string{}
|
||||
}
|
||||
|
||||
|
@ -270,6 +311,17 @@ func (f *FakeDockerClient) AssertStopped(stopped []string) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (f *FakeDockerClient) AssertRemoved(removed []string) error {
|
||||
f.Lock()
|
||||
defer f.Unlock()
|
||||
sort.StringSlice(removed).Sort()
|
||||
sort.StringSlice(f.Removed).Sort()
|
||||
if !reflect.DeepEqual(removed, f.Removed) {
|
||||
return fmt.Errorf("expected %#v, got %#v", removed, f.Removed)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *FakeDockerClient) popError(op string) error {
|
||||
if f.Errors == nil {
|
||||
return nil
|
||||
|
@ -288,7 +340,7 @@ func (f *FakeDockerClient) popError(op string) error {
|
|||
func (f *FakeDockerClient) ListContainers(options dockertypes.ContainerListOptions) ([]dockertypes.Container, error) {
|
||||
f.Lock()
|
||||
defer f.Unlock()
|
||||
f.called = append(f.called, calledDetail{name: "list"})
|
||||
f.appendCalled(calledDetail{name: "list"})
|
||||
err := f.popError("list")
|
||||
containerList := append([]dockertypes.Container{}, f.RunningContainerList...)
|
||||
if options.All {
|
||||
|
@ -305,7 +357,7 @@ func (f *FakeDockerClient) ListContainers(options dockertypes.ContainerListOptio
|
|||
func (f *FakeDockerClient) InspectContainer(id string) (*dockertypes.ContainerJSON, error) {
|
||||
f.Lock()
|
||||
defer f.Unlock()
|
||||
f.called = append(f.called, calledDetail{name: "inspect_container"})
|
||||
f.appendCalled(calledDetail{name: "inspect_container"})
|
||||
err := f.popError("inspect_container")
|
||||
if container, ok := f.ContainerMap[id]; ok {
|
||||
return container, err
|
||||
|
@ -322,7 +374,7 @@ func (f *FakeDockerClient) InspectContainer(id string) (*dockertypes.ContainerJS
|
|||
func (f *FakeDockerClient) InspectImageByRef(name string) (*dockertypes.ImageInspect, error) {
|
||||
f.Lock()
|
||||
defer f.Unlock()
|
||||
f.called = append(f.called, calledDetail{name: "inspect_image"})
|
||||
f.appendCalled(calledDetail{name: "inspect_image"})
|
||||
err := f.popError("inspect_image")
|
||||
return f.Image, err
|
||||
}
|
||||
|
@ -332,7 +384,7 @@ func (f *FakeDockerClient) InspectImageByRef(name string) (*dockertypes.ImageIns
|
|||
func (f *FakeDockerClient) InspectImageByID(name string) (*dockertypes.ImageInspect, error) {
|
||||
f.Lock()
|
||||
defer f.Unlock()
|
||||
f.called = append(f.called, calledDetail{name: "inspect_image"})
|
||||
f.appendCalled(calledDetail{name: "inspect_image"})
|
||||
err := f.popError("inspect_image")
|
||||
return f.Image, err
|
||||
}
|
||||
|
@ -356,7 +408,7 @@ func (f *FakeDockerClient) normalSleep(mean, stdDev, cutOffMillis int) {
|
|||
func (f *FakeDockerClient) CreateContainer(c dockertypes.ContainerCreateConfig) (*dockertypes.ContainerCreateResponse, error) {
|
||||
f.Lock()
|
||||
defer f.Unlock()
|
||||
f.called = append(f.called, calledDetail{name: "create"})
|
||||
f.appendCalled(calledDetail{name: "create"})
|
||||
if err := f.popError("create"); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -364,7 +416,7 @@ func (f *FakeDockerClient) CreateContainer(c dockertypes.ContainerCreateConfig)
|
|||
// Docker likes to add a '/', so copy that behavior.
|
||||
name := "/" + c.Name
|
||||
id := name
|
||||
f.Created = append(f.Created, name)
|
||||
f.appendContainerTrace("Created", name)
|
||||
// The newest container should be in front, because we assume so in GetPodStatus()
|
||||
f.RunningContainerList = append([]dockertypes.Container{
|
||||
{ID: name, Names: []string{name}, Image: c.Config.Image, Labels: c.Config.Labels},
|
||||
|
@ -380,11 +432,11 @@ func (f *FakeDockerClient) CreateContainer(c dockertypes.ContainerCreateConfig)
|
|||
func (f *FakeDockerClient) StartContainer(id string) error {
|
||||
f.Lock()
|
||||
defer f.Unlock()
|
||||
f.called = append(f.called, calledDetail{name: "start"})
|
||||
f.appendCalled(calledDetail{name: "start"})
|
||||
if err := f.popError("start"); err != nil {
|
||||
return err
|
||||
}
|
||||
f.Started = append(f.Started, id)
|
||||
f.appendContainerTrace("Started", id)
|
||||
container, ok := f.ContainerMap[id]
|
||||
if !ok {
|
||||
container = convertFakeContainer(&FakeContainer{ID: id, Name: id})
|
||||
|
@ -404,11 +456,11 @@ func (f *FakeDockerClient) StartContainer(id string) error {
|
|||
func (f *FakeDockerClient) StopContainer(id string, timeout int) error {
|
||||
f.Lock()
|
||||
defer f.Unlock()
|
||||
f.called = append(f.called, calledDetail{name: "stop"})
|
||||
f.appendCalled(calledDetail{name: "stop"})
|
||||
if err := f.popError("stop"); err != nil {
|
||||
return err
|
||||
}
|
||||
f.Stopped = append(f.Stopped, id)
|
||||
f.appendContainerTrace("Stopped", id)
|
||||
// Container status should be Updated before container moved to ExitedContainerList
|
||||
f.updateContainerStatus(id, statusExitedPrefix)
|
||||
var newList []dockertypes.Container
|
||||
|
@ -442,7 +494,7 @@ func (f *FakeDockerClient) StopContainer(id string, timeout int) error {
|
|||
func (f *FakeDockerClient) RemoveContainer(id string, opts dockertypes.ContainerRemoveOptions) error {
|
||||
f.Lock()
|
||||
defer f.Unlock()
|
||||
f.called = append(f.called, calledDetail{name: "remove"})
|
||||
f.appendCalled(calledDetail{name: "remove"})
|
||||
err := f.popError("remove")
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -451,7 +503,7 @@ func (f *FakeDockerClient) RemoveContainer(id string, opts dockertypes.Container
|
|||
if f.ExitedContainerList[i].ID == id {
|
||||
delete(f.ContainerMap, id)
|
||||
f.ExitedContainerList = append(f.ExitedContainerList[:i], f.ExitedContainerList[i+1:]...)
|
||||
f.Removed = append(f.Removed, id)
|
||||
f.appendContainerTrace("Removed", id)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -465,7 +517,7 @@ func (f *FakeDockerClient) RemoveContainer(id string, opts dockertypes.Container
|
|||
func (f *FakeDockerClient) Logs(id string, opts dockertypes.ContainerLogsOptions, sopts StreamOptions) error {
|
||||
f.Lock()
|
||||
defer f.Unlock()
|
||||
f.called = append(f.called, calledDetail{name: "logs"})
|
||||
f.appendCalled(calledDetail{name: "logs"})
|
||||
return f.popError("logs")
|
||||
}
|
||||
|
||||
|
@ -474,7 +526,7 @@ func (f *FakeDockerClient) Logs(id string, opts dockertypes.ContainerLogsOptions
|
|||
func (f *FakeDockerClient) PullImage(image string, auth dockertypes.AuthConfig, opts dockertypes.ImagePullOptions) error {
|
||||
f.Lock()
|
||||
defer f.Unlock()
|
||||
f.called = append(f.called, calledDetail{name: "pull"})
|
||||
f.appendCalled(calledDetail{name: "pull"})
|
||||
err := f.popError("pull")
|
||||
if err == nil {
|
||||
authJson, _ := json.Marshal(auth)
|
||||
|
@ -482,7 +534,7 @@ func (f *FakeDockerClient) PullImage(image string, auth dockertypes.AuthConfig,
|
|||
ID: image,
|
||||
RepoTags: []string{image},
|
||||
}
|
||||
f.pulled = append(f.pulled, fmt.Sprintf("%s using %s", image, string(authJson)))
|
||||
f.appendPulled(fmt.Sprintf("%s using %s", image, string(authJson)))
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
@ -501,21 +553,21 @@ func (f *FakeDockerClient) CreateExec(id string, opts dockertypes.ExecConfig) (*
|
|||
f.Lock()
|
||||
defer f.Unlock()
|
||||
f.execCmd = opts.Cmd
|
||||
f.called = append(f.called, calledDetail{name: "create_exec"})
|
||||
f.appendCalled(calledDetail{name: "create_exec"})
|
||||
return &dockertypes.ContainerExecCreateResponse{ID: "12345678"}, nil
|
||||
}
|
||||
|
||||
func (f *FakeDockerClient) StartExec(startExec string, opts dockertypes.ExecStartCheck, sopts StreamOptions) error {
|
||||
f.Lock()
|
||||
defer f.Unlock()
|
||||
f.called = append(f.called, calledDetail{name: "start_exec"})
|
||||
f.appendCalled(calledDetail{name: "start_exec"})
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *FakeDockerClient) AttachToContainer(id string, opts dockertypes.ContainerAttachOptions, sopts StreamOptions) error {
|
||||
f.Lock()
|
||||
defer f.Unlock()
|
||||
f.called = append(f.called, calledDetail{name: "attach"})
|
||||
f.appendCalled(calledDetail{name: "attach"})
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -524,13 +576,13 @@ func (f *FakeDockerClient) InspectExec(id string) (*dockertypes.ContainerExecIns
|
|||
}
|
||||
|
||||
func (f *FakeDockerClient) ListImages(opts dockertypes.ImageListOptions) ([]dockertypes.Image, error) {
|
||||
f.called = append(f.called, calledDetail{name: "list_images"})
|
||||
f.appendCalled(calledDetail{name: "list_images"})
|
||||
err := f.popError("list_images")
|
||||
return f.Images, err
|
||||
}
|
||||
|
||||
func (f *FakeDockerClient) RemoveImage(image string, opts dockertypes.ImageRemoveOptions) ([]dockertypes.ImageDelete, error) {
|
||||
f.called = append(f.called, calledDetail{name: "remove_image", arguments: []interface{}{image, opts}})
|
||||
f.appendCalled(calledDetail{name: "remove_image", arguments: []interface{}{image, opts}})
|
||||
err := f.popError("remove_image")
|
||||
if err == nil {
|
||||
for i := range f.Images {
|
||||
|
@ -560,14 +612,14 @@ func (f *FakeDockerClient) updateContainerStatus(id, status string) {
|
|||
func (f *FakeDockerClient) ResizeExecTTY(id string, height, width int) error {
|
||||
f.Lock()
|
||||
defer f.Unlock()
|
||||
f.called = append(f.called, calledDetail{name: "resize_exec"})
|
||||
f.appendCalled(calledDetail{name: "resize_exec"})
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *FakeDockerClient) ResizeContainerTTY(id string, height, width int) error {
|
||||
f.Lock()
|
||||
defer f.Unlock()
|
||||
f.called = append(f.called, calledDetail{name: "resize_container"})
|
||||
f.appendCalled(calledDetail{name: "resize_container"})
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -612,7 +664,7 @@ func (f *FakeDockerPuller) GetImageRef(name string) (string, error) {
|
|||
func (f *FakeDockerClient) ImageHistory(id string) ([]dockertypes.ImageHistory, error) {
|
||||
f.Lock()
|
||||
defer f.Unlock()
|
||||
f.called = append(f.called, calledDetail{name: "image_history"})
|
||||
f.appendCalled(calledDetail{name: "image_history"})
|
||||
history := f.ImageHistoryMap[id]
|
||||
return history, nil
|
||||
}
|
||||
|
|
2
vendor/k8s.io/kubernetes/pkg/kubelet/dockertools/fake_manager.go
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/kubelet/dockertools/fake_manager.go
generated
vendored
|
@ -19,6 +19,7 @@ package dockertools
|
|||
import (
|
||||
cadvisorapi "github.com/google/cadvisor/info/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/util/flowcontrol"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/client/record"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
|
@ -26,7 +27,6 @@ import (
|
|||
proberesults "k8s.io/kubernetes/pkg/kubelet/prober/results"
|
||||
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
"k8s.io/kubernetes/pkg/kubelet/util/cache"
|
||||
"k8s.io/kubernetes/pkg/util/flowcontrol"
|
||||
"k8s.io/kubernetes/pkg/util/oom"
|
||||
"k8s.io/kubernetes/pkg/util/procfs"
|
||||
)
|
||||
|
|
6
vendor/k8s.io/kubernetes/pkg/kubelet/dockertools/images_test.go
generated
vendored
6
vendor/k8s.io/kubernetes/pkg/kubelet/dockertools/images_test.go
generated
vendored
|
@ -24,7 +24,7 @@ import (
|
|||
)
|
||||
|
||||
func TestImageStatsNoImages(t *testing.T) {
|
||||
fakeDockerClient := NewFakeDockerClientWithVersion("1.2.3", "1.2")
|
||||
fakeDockerClient := NewFakeDockerClient().WithVersion("1.2.3", "1.2")
|
||||
isp := newImageStatsProvider(fakeDockerClient)
|
||||
st, err := isp.ImageStats()
|
||||
as := assert.New(t)
|
||||
|
@ -34,7 +34,7 @@ func TestImageStatsNoImages(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestImageStatsWithImages(t *testing.T) {
|
||||
fakeDockerClient := NewFakeDockerClientWithVersion("1.2.3", "1.2")
|
||||
fakeDockerClient := NewFakeDockerClient().WithVersion("1.2.3", "1.2")
|
||||
fakeHistoryData := map[string][]dockertypes.ImageHistory{
|
||||
"busybox": {
|
||||
{
|
||||
|
@ -317,7 +317,7 @@ func TestImageStatsWithCachedImages(t *testing.T) {
|
|||
expectedTotalStorageSize: 600,
|
||||
},
|
||||
} {
|
||||
fakeDockerClient := NewFakeDockerClientWithVersion("1.2.3", "1.2")
|
||||
fakeDockerClient := NewFakeDockerClient().WithVersion("1.2.3", "1.2")
|
||||
fakeDockerClient.InjectImages(test.images)
|
||||
fakeDockerClient.InjectImageHistory(test.history)
|
||||
isp := newImageStatsProvider(fakeDockerClient)
|
||||
|
|
16
vendor/k8s.io/kubernetes/pkg/kubelet/dockertools/labels.go
generated
vendored
16
vendor/k8s.io/kubernetes/pkg/kubelet/dockertools/labels.go
generated
vendored
|
@ -39,11 +39,12 @@ const (
|
|||
kubernetesPodDeletionGracePeriodLabel = "io.kubernetes.pod.deletionGracePeriod"
|
||||
kubernetesPodTerminationGracePeriodLabel = "io.kubernetes.pod.terminationGracePeriod"
|
||||
|
||||
kubernetesContainerHashLabel = "io.kubernetes.container.hash"
|
||||
kubernetesContainerRestartCountLabel = "io.kubernetes.container.restartCount"
|
||||
kubernetesContainerTerminationMessagePathLabel = "io.kubernetes.container.terminationMessagePath"
|
||||
kubernetesContainerPreStopHandlerLabel = "io.kubernetes.container.preStopHandler"
|
||||
kubernetesContainerPortsLabel = "io.kubernetes.container.ports" // Added in 1.4
|
||||
kubernetesContainerHashLabel = "io.kubernetes.container.hash"
|
||||
kubernetesContainerRestartCountLabel = "io.kubernetes.container.restartCount"
|
||||
kubernetesContainerTerminationMessagePathLabel = "io.kubernetes.container.terminationMessagePath"
|
||||
kubernetesContainerTerminationMessagePolicyLabel = "io.kubernetes.container.terminationMessagePolicy"
|
||||
kubernetesContainerPreStopHandlerLabel = "io.kubernetes.container.preStopHandler"
|
||||
kubernetesContainerPortsLabel = "io.kubernetes.container.ports" // Added in 1.4
|
||||
|
||||
// TODO(random-liu): Keep this for old containers, remove this when we drop support for v1.1.
|
||||
kubernetesPodLabel = "io.kubernetes.pod.data"
|
||||
|
@ -63,6 +64,7 @@ type labelledContainerInfo struct {
|
|||
Hash string
|
||||
RestartCount int
|
||||
TerminationMessagePath string
|
||||
TerminationMessagePolicy v1.TerminationMessagePolicy
|
||||
PreStopHandler *v1.Handler
|
||||
Ports []v1.ContainerPort
|
||||
}
|
||||
|
@ -83,6 +85,7 @@ func newLabels(container *v1.Container, pod *v1.Pod, restartCount int, enableCus
|
|||
labels[kubernetesContainerHashLabel] = strconv.FormatUint(kubecontainer.HashContainer(container), 16)
|
||||
labels[kubernetesContainerRestartCountLabel] = strconv.Itoa(restartCount)
|
||||
labels[kubernetesContainerTerminationMessagePathLabel] = container.TerminationMessagePath
|
||||
labels[kubernetesContainerTerminationMessagePolicyLabel] = string(container.TerminationMessagePolicy)
|
||||
if container.Lifecycle != nil && container.Lifecycle.PreStop != nil {
|
||||
// Using json enconding so that the PreStop handler object is readable after writing as a label
|
||||
rawPreStop, err := json.Marshal(container.Lifecycle.PreStop)
|
||||
|
@ -118,7 +121,8 @@ func getContainerInfoFromLabel(labels map[string]string) *labelledContainerInfo
|
|||
PodUID: kubetypes.UID(getStringValueFromLabel(labels, types.KubernetesPodUIDLabel)),
|
||||
Name: getStringValueFromLabel(labels, types.KubernetesContainerNameLabel),
|
||||
Hash: getStringValueFromLabel(labels, kubernetesContainerHashLabel),
|
||||
TerminationMessagePath: getStringValueFromLabel(labels, kubernetesContainerTerminationMessagePathLabel),
|
||||
TerminationMessagePath: getStringValueFromLabel(labels, kubernetesContainerTerminationMessagePathLabel),
|
||||
TerminationMessagePolicy: v1.TerminationMessagePolicy(getStringValueFromLabel(labels, kubernetesContainerTerminationMessagePolicyLabel)),
|
||||
}
|
||||
if containerInfo.RestartCount, err = getIntValueFromLabel(labels, kubernetesContainerRestartCountLabel); err != nil {
|
||||
logError(containerInfo, kubernetesContainerRestartCountLabel, err)
|
||||
|
|
3
vendor/k8s.io/kubernetes/pkg/kubelet/dockertools/labels_test.go
generated
vendored
3
vendor/k8s.io/kubernetes/pkg/kubelet/dockertools/labels_test.go
generated
vendored
|
@ -21,6 +21,7 @@ import (
|
|||
"strconv"
|
||||
"testing"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
|
@ -71,7 +72,7 @@ func TestLabels(t *testing.T) {
|
|||
Lifecycle: lifecycle,
|
||||
}
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test_pod",
|
||||
Namespace: "test_pod_namespace",
|
||||
UID: "test_pod_uid",
|
||||
|
|
1
vendor/k8s.io/kubernetes/pkg/kubelet/envvars/BUILD
generated
vendored
1
vendor/k8s.io/kubernetes/pkg/kubelet/envvars/BUILD
generated
vendored
|
@ -25,6 +25,7 @@ go_test(
|
|||
deps = [
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//pkg/kubelet/envvars:go_default_library",
|
||||
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
|
||||
],
|
||||
)
|
||||
|
||||
|
|
11
vendor/k8s.io/kubernetes/pkg/kubelet/envvars/envvars_test.go
generated
vendored
11
vendor/k8s.io/kubernetes/pkg/kubelet/envvars/envvars_test.go
generated
vendored
|
@ -20,6 +20,7 @@ import (
|
|||
"reflect"
|
||||
"testing"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/kubelet/envvars"
|
||||
)
|
||||
|
@ -27,7 +28,7 @@ import (
|
|||
func TestFromServices(t *testing.T) {
|
||||
sl := []*v1.Service{
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{Name: "foo-bar"},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "foo-bar"},
|
||||
Spec: v1.ServiceSpec{
|
||||
Selector: map[string]string{"bar": "baz"},
|
||||
ClusterIP: "1.2.3.4",
|
||||
|
@ -37,7 +38,7 @@ func TestFromServices(t *testing.T) {
|
|||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{Name: "abc-123"},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "abc-123"},
|
||||
Spec: v1.ServiceSpec{
|
||||
Selector: map[string]string{"bar": "baz"},
|
||||
ClusterIP: "5.6.7.8",
|
||||
|
@ -48,7 +49,7 @@ func TestFromServices(t *testing.T) {
|
|||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{Name: "q-u-u-x"},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "q-u-u-x"},
|
||||
Spec: v1.ServiceSpec{
|
||||
Selector: map[string]string{"bar": "baz"},
|
||||
ClusterIP: "9.8.7.6",
|
||||
|
@ -59,7 +60,7 @@ func TestFromServices(t *testing.T) {
|
|||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{Name: "svrc-clusterip-none"},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "svrc-clusterip-none"},
|
||||
Spec: v1.ServiceSpec{
|
||||
Selector: map[string]string{"bar": "baz"},
|
||||
ClusterIP: "None",
|
||||
|
@ -69,7 +70,7 @@ func TestFromServices(t *testing.T) {
|
|||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{Name: "svrc-clusterip-empty"},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "svrc-clusterip-empty"},
|
||||
Spec: v1.ServiceSpec{
|
||||
Selector: map[string]string{"bar": "baz"},
|
||||
ClusterIP: "",
|
||||
|
|
4
vendor/k8s.io/kubernetes/pkg/kubelet/eviction/BUILD
generated
vendored
4
vendor/k8s.io/kubernetes/pkg/kubelet/eviction/BUILD
generated
vendored
|
@ -33,11 +33,11 @@ go_library(
|
|||
"//pkg/kubelet/types:go_default_library",
|
||||
"//pkg/kubelet/util/format:go_default_library",
|
||||
"//pkg/quota/evaluator/core:go_default_library",
|
||||
"//pkg/util/clock:go_default_library",
|
||||
"//vendor:github.com/golang/glog",
|
||||
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/sets",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/wait",
|
||||
"//vendor:k8s.io/client-go/util/clock",
|
||||
],
|
||||
)
|
||||
|
||||
|
@ -58,9 +58,9 @@ go_test(
|
|||
"//pkg/kubelet/lifecycle:go_default_library",
|
||||
"//pkg/kubelet/types:go_default_library",
|
||||
"//pkg/quota:go_default_library",
|
||||
"//pkg/util/clock:go_default_library",
|
||||
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
|
||||
"//vendor:k8s.io/apimachinery/pkg/types",
|
||||
"//vendor:k8s.io/client-go/util/clock",
|
||||
],
|
||||
)
|
||||
|
||||
|
|
8
vendor/k8s.io/kubernetes/pkg/kubelet/eviction/OWNERS
generated
vendored
8
vendor/k8s.io/kubernetes/pkg/kubelet/eviction/OWNERS
generated
vendored
|
@ -1,4 +1,4 @@
|
|||
assignees:
|
||||
- derekwaynecarr
|
||||
- vishh
|
||||
- dchen1107
|
||||
approvers:
|
||||
- derekwaynecarr
|
||||
- vishh
|
||||
- dchen1107
|
||||
|
|
2
vendor/k8s.io/kubernetes/pkg/kubelet/eviction/eviction_manager.go
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/kubelet/eviction/eviction_manager.go
generated
vendored
|
@ -24,6 +24,7 @@ import (
|
|||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/util/clock"
|
||||
"k8s.io/kubernetes/pkg/api/resource"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/client/record"
|
||||
|
@ -34,7 +35,6 @@ import (
|
|||
"k8s.io/kubernetes/pkg/kubelet/server/stats"
|
||||
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
"k8s.io/kubernetes/pkg/kubelet/util/format"
|
||||
"k8s.io/kubernetes/pkg/util/clock"
|
||||
)
|
||||
|
||||
// managerImpl implements Manager
|
||||
|
|
2
vendor/k8s.io/kubernetes/pkg/kubelet/eviction/eviction_manager_test.go
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/kubelet/eviction/eviction_manager_test.go
generated
vendored
|
@ -21,13 +21,13 @@ import (
|
|||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/util/clock"
|
||||
"k8s.io/kubernetes/pkg/api/resource"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/client/record"
|
||||
statsapi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/stats"
|
||||
"k8s.io/kubernetes/pkg/kubelet/lifecycle"
|
||||
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
"k8s.io/kubernetes/pkg/util/clock"
|
||||
)
|
||||
|
||||
// mockPodKiller is used to testing which pod is killed
|
||||
|
|
2
vendor/k8s.io/kubernetes/pkg/kubelet/eviction/helpers_test.go
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/kubelet/eviction/helpers_test.go
generated
vendored
|
@ -1573,7 +1573,7 @@ func newVolume(name string, volumeSource v1.VolumeSource) v1.Volume {
|
|||
// newPod uses the name as the uid. Make names unique for testing.
|
||||
func newPod(name string, containers []v1.Container, volumes []v1.Volume) *v1.Pod {
|
||||
return &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
UID: types.UID(name),
|
||||
},
|
||||
|
|
7
vendor/k8s.io/kubernetes/pkg/kubelet/images/BUILD
generated
vendored
7
vendor/k8s.io/kubernetes/pkg/kubelet/images/BUILD
generated
vendored
|
@ -25,13 +25,13 @@ go_library(
|
|||
"//pkg/kubelet/cadvisor:go_default_library",
|
||||
"//pkg/kubelet/container:go_default_library",
|
||||
"//pkg/kubelet/events:go_default_library",
|
||||
"//pkg/util/flowcontrol:go_default_library",
|
||||
"//pkg/util/parsers:go_default_library",
|
||||
"//vendor:github.com/docker/distribution/reference",
|
||||
"//vendor:github.com/golang/glog",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/errors",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/sets",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/wait",
|
||||
"//vendor:k8s.io/client-go/util/flowcontrol",
|
||||
],
|
||||
)
|
||||
|
||||
|
@ -49,11 +49,12 @@ go_test(
|
|||
"//pkg/kubelet/cadvisor/testing:go_default_library",
|
||||
"//pkg/kubelet/container:go_default_library",
|
||||
"//pkg/kubelet/container/testing:go_default_library",
|
||||
"//pkg/util/clock:go_default_library",
|
||||
"//pkg/util/flowcontrol:go_default_library",
|
||||
"//vendor:github.com/google/cadvisor/info/v2",
|
||||
"//vendor:github.com/stretchr/testify/assert",
|
||||
"//vendor:github.com/stretchr/testify/require",
|
||||
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
|
||||
"//vendor:k8s.io/client-go/util/clock",
|
||||
"//vendor:k8s.io/client-go/util/flowcontrol",
|
||||
],
|
||||
)
|
||||
|
||||
|
|
2
vendor/k8s.io/kubernetes/pkg/kubelet/images/helpers.go
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/kubelet/images/helpers.go
generated
vendored
|
@ -19,9 +19,9 @@ package images
|
|||
import (
|
||||
"fmt"
|
||||
|
||||
"k8s.io/client-go/util/flowcontrol"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
"k8s.io/kubernetes/pkg/util/flowcontrol"
|
||||
)
|
||||
|
||||
// throttleImagePulling wraps kubecontainer.ImageService to throttle image
|
||||
|
|
2
vendor/k8s.io/kubernetes/pkg/kubelet/images/image_gc_manager_test.go
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/kubelet/images/image_gc_manager_test.go
generated
vendored
|
@ -24,11 +24,11 @@ import (
|
|||
cadvisorapiv2 "github.com/google/cadvisor/info/v2"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"k8s.io/client-go/util/clock"
|
||||
"k8s.io/kubernetes/pkg/client/record"
|
||||
cadvisortest "k8s.io/kubernetes/pkg/kubelet/cadvisor/testing"
|
||||
"k8s.io/kubernetes/pkg/kubelet/container"
|
||||
containertest "k8s.io/kubernetes/pkg/kubelet/container/testing"
|
||||
"k8s.io/kubernetes/pkg/util/clock"
|
||||
)
|
||||
|
||||
var zero time.Time
|
||||
|
|
2
vendor/k8s.io/kubernetes/pkg/kubelet/images/image_manager.go
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/kubelet/images/image_manager.go
generated
vendored
|
@ -21,11 +21,11 @@ import (
|
|||
|
||||
dockerref "github.com/docker/distribution/reference"
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/client-go/util/flowcontrol"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/client/record"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
"k8s.io/kubernetes/pkg/kubelet/events"
|
||||
"k8s.io/kubernetes/pkg/util/flowcontrol"
|
||||
"k8s.io/kubernetes/pkg/util/parsers"
|
||||
)
|
||||
|
||||
|
|
9
vendor/k8s.io/kubernetes/pkg/kubelet/images/image_manager_test.go
generated
vendored
9
vendor/k8s.io/kubernetes/pkg/kubelet/images/image_manager_test.go
generated
vendored
|
@ -22,12 +22,13 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/util/clock"
|
||||
"k8s.io/client-go/util/flowcontrol"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/client/record"
|
||||
. "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
ctest "k8s.io/kubernetes/pkg/kubelet/container/testing"
|
||||
"k8s.io/kubernetes/pkg/util/clock"
|
||||
"k8s.io/kubernetes/pkg/util/flowcontrol"
|
||||
)
|
||||
|
||||
type pullerTestCase struct {
|
||||
|
@ -111,7 +112,7 @@ func pullerTestEnv(c pullerTestCase, serialized bool) (puller ImageManager, fake
|
|||
|
||||
func TestParallelPuller(t *testing.T) {
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test_pod",
|
||||
Namespace: "test-ns",
|
||||
UID: "bar",
|
||||
|
@ -135,7 +136,7 @@ func TestParallelPuller(t *testing.T) {
|
|||
|
||||
func TestSerializedPuller(t *testing.T) {
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test_pod",
|
||||
Namespace: "test-ns",
|
||||
UID: "bar",
|
||||
|
|
55
vendor/k8s.io/kubernetes/pkg/kubelet/kubelet.go
generated
vendored
55
vendor/k8s.io/kubernetes/pkg/kubelet/kubelet.go
generated
vendored
|
@ -34,11 +34,16 @@ import (
|
|||
clientgoclientset "k8s.io/client-go/kubernetes"
|
||||
|
||||
cadvisorapi "github.com/google/cadvisor/info/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/util/clock"
|
||||
"k8s.io/client-go/util/flowcontrol"
|
||||
"k8s.io/client-go/util/integer"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/resource"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
|
@ -46,9 +51,9 @@ import (
|
|||
componentconfigv1alpha1 "k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||
"k8s.io/kubernetes/pkg/client/legacylisters"
|
||||
"k8s.io/kubernetes/pkg/client/record"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
"k8s.io/kubernetes/pkg/fields"
|
||||
internalapi "k8s.io/kubernetes/pkg/kubelet/api"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cadvisor"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm"
|
||||
|
@ -70,6 +75,7 @@ import (
|
|||
proberesults "k8s.io/kubernetes/pkg/kubelet/prober/results"
|
||||
"k8s.io/kubernetes/pkg/kubelet/remote"
|
||||
"k8s.io/kubernetes/pkg/kubelet/rkt"
|
||||
"k8s.io/kubernetes/pkg/kubelet/secret"
|
||||
"k8s.io/kubernetes/pkg/kubelet/server"
|
||||
"k8s.io/kubernetes/pkg/kubelet/server/stats"
|
||||
"k8s.io/kubernetes/pkg/kubelet/server/streaming"
|
||||
|
@ -82,12 +88,9 @@ import (
|
|||
"k8s.io/kubernetes/pkg/kubelet/volumemanager"
|
||||
"k8s.io/kubernetes/pkg/security/apparmor"
|
||||
"k8s.io/kubernetes/pkg/util/bandwidth"
|
||||
"k8s.io/kubernetes/pkg/util/clock"
|
||||
utilconfig "k8s.io/kubernetes/pkg/util/config"
|
||||
utildbus "k8s.io/kubernetes/pkg/util/dbus"
|
||||
utilexec "k8s.io/kubernetes/pkg/util/exec"
|
||||
"k8s.io/kubernetes/pkg/util/flowcontrol"
|
||||
"k8s.io/kubernetes/pkg/util/integer"
|
||||
kubeio "k8s.io/kubernetes/pkg/util/io"
|
||||
utilipt "k8s.io/kubernetes/pkg/util/iptables"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
|
@ -379,18 +382,18 @@ func NewMainKubelet(kubeCfg *componentconfig.KubeletConfiguration, kubeDeps *Kub
|
|||
|
||||
serviceStore := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})
|
||||
if kubeClient != nil {
|
||||
serviceLW := cache.NewListWatchFromClient(kubeClient.Core().RESTClient(), "services", v1.NamespaceAll, fields.Everything())
|
||||
serviceLW := cache.NewListWatchFromClient(kubeClient.Core().RESTClient(), "services", metav1.NamespaceAll, fields.Everything())
|
||||
cache.NewReflector(serviceLW, &v1.Service{}, serviceStore, 0).Run()
|
||||
}
|
||||
serviceLister := &cache.StoreToServiceLister{Indexer: serviceStore}
|
||||
serviceLister := &listers.StoreToServiceLister{Indexer: serviceStore}
|
||||
|
||||
nodeStore := cache.NewStore(cache.MetaNamespaceKeyFunc)
|
||||
if kubeClient != nil {
|
||||
fieldSelector := fields.Set{api.ObjectNameField: string(nodeName)}.AsSelector()
|
||||
nodeLW := cache.NewListWatchFromClient(kubeClient.Core().RESTClient(), "nodes", v1.NamespaceAll, fieldSelector)
|
||||
nodeLW := cache.NewListWatchFromClient(kubeClient.Core().RESTClient(), "nodes", metav1.NamespaceAll, fieldSelector)
|
||||
cache.NewReflector(nodeLW, &v1.Node{}, nodeStore, 0).Run()
|
||||
}
|
||||
nodeLister := &cache.StoreToNodeLister{Store: nodeStore}
|
||||
nodeLister := &listers.StoreToNodeLister{Store: nodeStore}
|
||||
nodeInfo := &predicates.CachedNodeInfo{StoreToNodeLister: nodeLister}
|
||||
|
||||
// TODO: get the real node object of ourself,
|
||||
|
@ -409,6 +412,11 @@ func NewMainKubelet(kubeCfg *componentconfig.KubeletConfiguration, kubeDeps *Kub
|
|||
}
|
||||
containerRefManager := kubecontainer.NewRefManager()
|
||||
|
||||
secretManager, err := secret.NewSimpleSecretManager(kubeClient)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to initialize secret manager: %v", err)
|
||||
}
|
||||
|
||||
oomWatcher := NewOOMWatcher(kubeDeps.CAdvisorInterface, kubeDeps.Recorder)
|
||||
|
||||
klet := &Kubelet{
|
||||
|
@ -434,6 +442,7 @@ func NewMainKubelet(kubeCfg *componentconfig.KubeletConfiguration, kubeDeps *Kub
|
|||
recorder: kubeDeps.Recorder,
|
||||
cadvisor: kubeDeps.CAdvisorInterface,
|
||||
diskSpaceManager: diskSpaceManager,
|
||||
secretManager: secretManager,
|
||||
cloud: kubeDeps.Cloud,
|
||||
autoDetectCloudProvider: (componentconfigv1alpha1.AutoDetectCloudProvider == kubeCfg.CloudProvider),
|
||||
nodeRef: nodeRef,
|
||||
|
@ -498,7 +507,8 @@ func NewMainKubelet(kubeCfg *componentconfig.KubeletConfiguration, kubeDeps *Kub
|
|||
klet.livenessManager = proberesults.NewManager()
|
||||
|
||||
klet.podCache = kubecontainer.NewCache()
|
||||
klet.podManager = kubepod.NewBasicPodManager(kubepod.NewBasicMirrorClient(klet.kubeClient))
|
||||
// podManager is also responsible for keeping secretManager contents up-to-date.
|
||||
klet.podManager = kubepod.NewBasicPodManager(kubepod.NewBasicMirrorClient(klet.kubeClient), secretManager)
|
||||
|
||||
if kubeCfg.RemoteRuntimeEndpoint != "" {
|
||||
// kubeCfg.RemoteImageEndpoint is same as kubeCfg.RemoteRuntimeEndpoint if not explicitly specified
|
||||
|
@ -543,7 +553,7 @@ func NewMainKubelet(kubeCfg *componentconfig.KubeletConfiguration, kubeDeps *Kub
|
|||
streamingConfig := getStreamingConfig(kubeCfg, kubeDeps)
|
||||
// Use the new CRI shim for docker.
|
||||
ds, err := dockershim.NewDockerService(klet.dockerClient, kubeCfg.SeccompProfileRoot, kubeCfg.PodInfraContainerImage,
|
||||
streamingConfig, &pluginSettings, kubeCfg.RuntimeCgroups, kubeCfg.CgroupDriver)
|
||||
streamingConfig, &pluginSettings, kubeCfg.RuntimeCgroups, kubeCfg.CgroupDriver, dockerExecHandler)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -718,7 +728,7 @@ func NewMainKubelet(kubeCfg *componentconfig.KubeletConfiguration, kubeDeps *Kub
|
|||
kubeDeps.Recorder)
|
||||
|
||||
klet.volumePluginMgr, err =
|
||||
NewInitializedVolumePluginMgr(klet, kubeDeps.VolumePlugins)
|
||||
NewInitializedVolumePluginMgr(klet, secretManager, kubeDeps.VolumePlugins)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -739,7 +749,8 @@ func NewMainKubelet(kubeCfg *componentconfig.KubeletConfiguration, kubeDeps *Kub
|
|||
kubeDeps.Mounter,
|
||||
klet.getPodsDir(),
|
||||
kubeDeps.Recorder,
|
||||
kubeCfg.ExperimentalCheckNodeCapabilitiesBeforeMount)
|
||||
kubeCfg.ExperimentalCheckNodeCapabilitiesBeforeMount,
|
||||
kubeCfg.KeepTerminatedPodVolumes)
|
||||
|
||||
runtimeCache, err := kubecontainer.NewRuntimeCache(klet.containerRuntime)
|
||||
if err != nil {
|
||||
|
@ -913,6 +924,9 @@ type Kubelet struct {
|
|||
// Diskspace manager.
|
||||
diskSpaceManager diskSpaceManager
|
||||
|
||||
// Secret manager.
|
||||
secretManager secret.Manager
|
||||
|
||||
// Cached MachineInfo returned by cadvisor.
|
||||
machineInfo *cadvisorapi.MachineInfo
|
||||
|
||||
|
@ -1243,6 +1257,11 @@ func (kl *Kubelet) Run(updates <-chan kubetypes.PodUpdate) {
|
|||
// handled by pod workers).
|
||||
go wait.Until(kl.podKiller, 1*time.Second, wait.NeverStop)
|
||||
|
||||
// Start gorouting responsible for checking limits in resolv.conf
|
||||
if kl.resolverConfig != "" {
|
||||
go wait.Until(func() { kl.checkLimitsForResolvConf() }, 30*time.Second, wait.NeverStop)
|
||||
}
|
||||
|
||||
// Start component sync loops.
|
||||
kl.statusManager.Start()
|
||||
kl.probeManager.Start()
|
||||
|
@ -1299,6 +1318,8 @@ func (kl *Kubelet) GetClusterDNS(pod *v1.Pod) ([]string, []string, error) {
|
|||
if kl.resolverConfig == "" {
|
||||
hostDNS = []string{"127.0.0.1"}
|
||||
hostSearch = []string{"."}
|
||||
} else {
|
||||
hostSearch = kl.formDNSSearchForDNSDefault(hostSearch, pod)
|
||||
}
|
||||
return hostDNS, hostSearch, nil
|
||||
}
|
||||
|
@ -1307,15 +1328,7 @@ func (kl *Kubelet) GetClusterDNS(pod *v1.Pod) ([]string, []string, error) {
|
|||
// the pod. The cluster DNS server itself will forward queries to other nameservers that is configured to use,
|
||||
// in case the cluster DNS server cannot resolve the DNS query itself
|
||||
dns := []string{kl.clusterDNS.String()}
|
||||
|
||||
var dnsSearch []string
|
||||
if kl.clusterDomain != "" {
|
||||
nsSvcDomain := fmt.Sprintf("%s.svc.%s", pod.Namespace, kl.clusterDomain)
|
||||
svcDomain := fmt.Sprintf("svc.%s", kl.clusterDomain)
|
||||
dnsSearch = append([]string{nsSvcDomain, svcDomain, kl.clusterDomain}, hostSearch...)
|
||||
} else {
|
||||
dnsSearch = hostSearch
|
||||
}
|
||||
dnsSearch := kl.formDNSSearch(hostSearch, pod)
|
||||
return dns, dnsSearch, nil
|
||||
}
|
||||
|
||||
|
|
9
vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_cadvisor_test.go
generated
vendored
9
vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_cadvisor_test.go
generated
vendored
|
@ -36,6 +36,7 @@ func TestGetContainerInfo(t *testing.T) {
|
|||
}
|
||||
|
||||
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||
defer testKubelet.Cleanup()
|
||||
fakeRuntime := testKubelet.fakeRuntime
|
||||
kubelet := testKubelet.kubelet
|
||||
cadvisorReq := &cadvisorapi.ContainerInfoRequest{}
|
||||
|
@ -72,6 +73,7 @@ func TestGetRawContainerInfoRoot(t *testing.T) {
|
|||
},
|
||||
}
|
||||
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||
defer testKubelet.Cleanup()
|
||||
kubelet := testKubelet.kubelet
|
||||
mockCadvisor := testKubelet.fakeCadvisor
|
||||
cadvisorReq := &cadvisorapi.ContainerInfoRequest{}
|
||||
|
@ -99,6 +101,7 @@ func TestGetRawContainerInfoSubcontainers(t *testing.T) {
|
|||
},
|
||||
}
|
||||
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||
defer testKubelet.Cleanup()
|
||||
kubelet := testKubelet.kubelet
|
||||
mockCadvisor := testKubelet.fakeCadvisor
|
||||
cadvisorReq := &cadvisorapi.ContainerInfoRequest{}
|
||||
|
@ -117,6 +120,7 @@ func TestGetRawContainerInfoSubcontainers(t *testing.T) {
|
|||
func TestGetContainerInfoWhenCadvisorFailed(t *testing.T) {
|
||||
containerID := "ab2cdf"
|
||||
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||
defer testKubelet.Cleanup()
|
||||
kubelet := testKubelet.kubelet
|
||||
mockCadvisor := testKubelet.fakeCadvisor
|
||||
fakeRuntime := testKubelet.fakeRuntime
|
||||
|
@ -152,6 +156,7 @@ func TestGetContainerInfoWhenCadvisorFailed(t *testing.T) {
|
|||
|
||||
func TestGetContainerInfoOnNonExistContainer(t *testing.T) {
|
||||
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||
defer testKubelet.Cleanup()
|
||||
kubelet := testKubelet.kubelet
|
||||
mockCadvisor := testKubelet.fakeCadvisor
|
||||
fakeRuntime := testKubelet.fakeRuntime
|
||||
|
@ -166,6 +171,7 @@ func TestGetContainerInfoOnNonExistContainer(t *testing.T) {
|
|||
|
||||
func TestGetContainerInfoWhenContainerRuntimeFailed(t *testing.T) {
|
||||
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||
defer testKubelet.Cleanup()
|
||||
kubelet := testKubelet.kubelet
|
||||
mockCadvisor := testKubelet.fakeCadvisor
|
||||
fakeRuntime := testKubelet.fakeRuntime
|
||||
|
@ -187,6 +193,7 @@ func TestGetContainerInfoWhenContainerRuntimeFailed(t *testing.T) {
|
|||
|
||||
func TestGetContainerInfoWithNoContainers(t *testing.T) {
|
||||
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||
defer testKubelet.Cleanup()
|
||||
kubelet := testKubelet.kubelet
|
||||
mockCadvisor := testKubelet.fakeCadvisor
|
||||
|
||||
|
@ -205,6 +212,7 @@ func TestGetContainerInfoWithNoContainers(t *testing.T) {
|
|||
|
||||
func TestGetContainerInfoWithNoMatchingContainers(t *testing.T) {
|
||||
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||
defer testKubelet.Cleanup()
|
||||
fakeRuntime := testKubelet.fakeRuntime
|
||||
kubelet := testKubelet.kubelet
|
||||
mockCadvisor := testKubelet.fakeCadvisor
|
||||
|
@ -253,6 +261,7 @@ func TestHasDedicatedImageFs(t *testing.T) {
|
|||
}
|
||||
for testName, testCase := range testCases {
|
||||
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||
defer testKubelet.Cleanup()
|
||||
kubelet := testKubelet.kubelet
|
||||
mockCadvisor := testKubelet.fakeCadvisor
|
||||
mockCadvisor.On("Start").Return(nil)
|
||||
|
|
2
vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_getters_test.go
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_getters_test.go
generated
vendored
|
@ -25,6 +25,7 @@ import (
|
|||
|
||||
func TestKubeletDirs(t *testing.T) {
|
||||
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||
defer testKubelet.Cleanup()
|
||||
kubelet := testKubelet.kubelet
|
||||
root := kubelet.rootDirectory
|
||||
|
||||
|
@ -87,6 +88,7 @@ func TestKubeletDirs(t *testing.T) {
|
|||
|
||||
func TestKubeletDirsCompat(t *testing.T) {
|
||||
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||
defer testKubelet.Cleanup()
|
||||
kubelet := testKubelet.kubelet
|
||||
root := kubelet.rootDirectory
|
||||
if err := os.MkdirAll(root, 0750); err != nil {
|
||||
|
|
115
vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_network.go
generated
vendored
115
vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_network.go
generated
vendored
|
@ -20,6 +20,7 @@ import (
|
|||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
@ -89,6 +90,120 @@ func (kl *Kubelet) providerRequiresNetworkingConfiguration() bool {
|
|||
return supported
|
||||
}
|
||||
|
||||
func omitDuplicates(kl *Kubelet, pod *v1.Pod, combinedSearch []string) []string {
|
||||
uniqueDomains := map[string]bool{}
|
||||
|
||||
for _, dnsDomain := range combinedSearch {
|
||||
if _, exists := uniqueDomains[dnsDomain]; !exists {
|
||||
combinedSearch[len(uniqueDomains)] = dnsDomain
|
||||
uniqueDomains[dnsDomain] = true
|
||||
} else {
|
||||
log := fmt.Sprintf("Found and omitted duplicated dns domain in host search line: '%s' during merging with cluster dns domains", dnsDomain)
|
||||
kl.recorder.Event(pod, v1.EventTypeWarning, "DNSSearchForming", log)
|
||||
glog.Error(log)
|
||||
}
|
||||
}
|
||||
return combinedSearch[:len(uniqueDomains)]
|
||||
}
|
||||
|
||||
func formDNSSearchFitsLimits(kl *Kubelet, pod *v1.Pod, composedSearch []string) []string {
|
||||
// resolver file Search line current limitations
|
||||
resolvSearchLineDNSDomainsLimit := 6
|
||||
resolvSearchLineLenLimit := 255
|
||||
limitsExceeded := false
|
||||
|
||||
if len(composedSearch) > resolvSearchLineDNSDomainsLimit {
|
||||
composedSearch = composedSearch[:resolvSearchLineDNSDomainsLimit]
|
||||
limitsExceeded = true
|
||||
}
|
||||
|
||||
if resolvSearchhLineStrLen := len(strings.Join(composedSearch, " ")); resolvSearchhLineStrLen > resolvSearchLineLenLimit {
|
||||
cutDomainsNum := 0
|
||||
cutDoaminsLen := 0
|
||||
for i := len(composedSearch) - 1; i >= 0; i-- {
|
||||
cutDoaminsLen += len(composedSearch[i]) + 1
|
||||
cutDomainsNum++
|
||||
|
||||
if (resolvSearchhLineStrLen - cutDoaminsLen) <= resolvSearchLineLenLimit {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
composedSearch = composedSearch[:(len(composedSearch) - cutDomainsNum)]
|
||||
limitsExceeded = true
|
||||
}
|
||||
|
||||
if limitsExceeded {
|
||||
log := fmt.Sprintf("Search Line limits were exceeded, some dns names have been omitted, the applied search line is: %s", strings.Join(composedSearch, " "))
|
||||
kl.recorder.Event(pod, v1.EventTypeWarning, "DNSSearchForming", log)
|
||||
glog.Error(log)
|
||||
}
|
||||
return composedSearch
|
||||
}
|
||||
|
||||
func (kl *Kubelet) formDNSSearchForDNSDefault(hostSearch []string, pod *v1.Pod) []string {
|
||||
return formDNSSearchFitsLimits(kl, pod, hostSearch)
|
||||
}
|
||||
|
||||
func (kl *Kubelet) formDNSSearch(hostSearch []string, pod *v1.Pod) []string {
|
||||
if kl.clusterDomain == "" {
|
||||
formDNSSearchFitsLimits(kl, pod, hostSearch)
|
||||
return hostSearch
|
||||
}
|
||||
|
||||
nsSvcDomain := fmt.Sprintf("%s.svc.%s", pod.Namespace, kl.clusterDomain)
|
||||
svcDomain := fmt.Sprintf("svc.%s", kl.clusterDomain)
|
||||
dnsSearch := []string{nsSvcDomain, svcDomain, kl.clusterDomain}
|
||||
|
||||
combinedSearch := append(dnsSearch, hostSearch...)
|
||||
|
||||
combinedSearch = omitDuplicates(kl, pod, combinedSearch)
|
||||
return formDNSSearchFitsLimits(kl, pod, combinedSearch)
|
||||
}
|
||||
|
||||
func (kl *Kubelet) checkLimitsForResolvConf() {
|
||||
// resolver file Search line current limitations
|
||||
resolvSearchLineDNSDomainsLimit := 6
|
||||
resolvSearchLineLenLimit := 255
|
||||
|
||||
f, err := os.Open(kl.resolverConfig)
|
||||
if err != nil {
|
||||
kl.recorder.Event(kl.nodeRef, v1.EventTypeWarning, "checkLimitsForResolvConf", err.Error())
|
||||
glog.Error("checkLimitsForResolvConf: " + err.Error())
|
||||
return
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
_, hostSearch, err := kl.parseResolvConf(f)
|
||||
if err != nil {
|
||||
kl.recorder.Event(kl.nodeRef, v1.EventTypeWarning, "checkLimitsForResolvConf", err.Error())
|
||||
glog.Error("checkLimitsForResolvConf: " + err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
domainCntLimit := resolvSearchLineDNSDomainsLimit
|
||||
|
||||
if kl.clusterDomain != "" {
|
||||
domainCntLimit -= 3
|
||||
}
|
||||
|
||||
if len(hostSearch) > domainCntLimit {
|
||||
log := fmt.Sprintf("Resolv.conf file '%s' contains search line consisting of more than %d domains!", kl.resolverConfig, domainCntLimit)
|
||||
kl.recorder.Event(kl.nodeRef, v1.EventTypeWarning, "checkLimitsForResolvConf", log)
|
||||
glog.Error("checkLimitsForResolvConf: " + log)
|
||||
return
|
||||
}
|
||||
|
||||
if len(strings.Join(hostSearch, " ")) > resolvSearchLineLenLimit {
|
||||
log := fmt.Sprintf("Resolv.conf file '%s' contains search line which length is more than allowed %d chars!", kl.resolverConfig, resolvSearchLineLenLimit)
|
||||
kl.recorder.Event(kl.nodeRef, v1.EventTypeWarning, "checkLimitsForResolvConf", log)
|
||||
glog.Error("checkLimitsForResolvConf: " + log)
|
||||
return
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// parseResolveConf reads a resolv.conf file from the given reader, and parses
|
||||
// it into nameservers and searches, possibly returning an error.
|
||||
// TODO: move to utility package
|
||||
|
|
83
vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_network_test.go
generated
vendored
83
vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_network_test.go
generated
vendored
|
@ -17,17 +17,20 @@ limitations under the License.
|
|||
package kubelet
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/client/record"
|
||||
"k8s.io/kubernetes/pkg/util/bandwidth"
|
||||
)
|
||||
|
||||
func TestNodeIPParam(t *testing.T) {
|
||||
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||
defer testKubelet.Cleanup()
|
||||
kubelet := testKubelet.kubelet
|
||||
tests := []struct {
|
||||
nodeIP string
|
||||
|
@ -96,6 +99,7 @@ func TestParseResolvConf(t *testing.T) {
|
|||
{"#comment\nnameserver 1.2.3.4\n#comment\nsearch foo\ncomment", []string{"1.2.3.4"}, []string{"foo"}},
|
||||
}
|
||||
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||
defer testKubelet.Cleanup()
|
||||
kubelet := testKubelet.kubelet
|
||||
for i, tc := range testCases {
|
||||
ns, srch, err := kubelet.parseResolvConf(strings.NewReader(tc.data))
|
||||
|
@ -112,6 +116,84 @@ func TestParseResolvConf(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestComposeDNSSearch(t *testing.T) {
|
||||
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||
kubelet := testKubelet.kubelet
|
||||
|
||||
recorder := record.NewFakeRecorder(20)
|
||||
kubelet.recorder = recorder
|
||||
|
||||
pod := podWithUidNameNs("", "test_pod", "testNS")
|
||||
kubelet.clusterDomain = "TEST"
|
||||
|
||||
testCases := []struct {
|
||||
dnsNames []string
|
||||
hostNames []string
|
||||
resultSearch []string
|
||||
events []string
|
||||
}{
|
||||
{
|
||||
[]string{"testNS.svc.TEST", "svc.TEST", "TEST"},
|
||||
[]string{},
|
||||
[]string{"testNS.svc.TEST", "svc.TEST", "TEST"},
|
||||
[]string{},
|
||||
},
|
||||
|
||||
{
|
||||
[]string{"testNS.svc.TEST", "svc.TEST", "TEST"},
|
||||
[]string{"AAA", "svc.TEST", "BBB", "TEST"},
|
||||
[]string{"testNS.svc.TEST", "svc.TEST", "TEST", "AAA", "BBB"},
|
||||
[]string{
|
||||
"Found and omitted duplicated dns domain in host search line: 'svc.TEST' during merging with cluster dns domains",
|
||||
"Found and omitted duplicated dns domain in host search line: 'TEST' during merging with cluster dns domains",
|
||||
},
|
||||
},
|
||||
|
||||
{
|
||||
[]string{"testNS.svc.TEST", "svc.TEST", "TEST"},
|
||||
[]string{"AAA", strings.Repeat("B", 256), "BBB"},
|
||||
[]string{"testNS.svc.TEST", "svc.TEST", "TEST", "AAA"},
|
||||
[]string{"Search Line limits were exceeded, some dns names have been omitted, the applied search line is: testNS.svc.TEST svc.TEST TEST AAA"},
|
||||
},
|
||||
|
||||
{
|
||||
[]string{"testNS.svc.TEST", "svc.TEST", "TEST"},
|
||||
[]string{"AAA", "TEST", "BBB", "TEST", "CCC", "DDD"},
|
||||
[]string{"testNS.svc.TEST", "svc.TEST", "TEST", "AAA", "BBB", "CCC"},
|
||||
[]string{
|
||||
"Found and omitted duplicated dns domain in host search line: 'TEST' during merging with cluster dns domains",
|
||||
"Found and omitted duplicated dns domain in host search line: 'TEST' during merging with cluster dns domains",
|
||||
"Search Line limits were exceeded, some dns names have been omitted, the applied search line is: testNS.svc.TEST svc.TEST TEST AAA BBB CCC",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
fetchEvent := func(recorder *record.FakeRecorder) string {
|
||||
select {
|
||||
case event := <-recorder.Events:
|
||||
return event
|
||||
default:
|
||||
return "No more events!"
|
||||
}
|
||||
}
|
||||
|
||||
for i, tc := range testCases {
|
||||
dnsSearch := kubelet.formDNSSearch(tc.hostNames, pod)
|
||||
|
||||
if !reflect.DeepEqual(dnsSearch, tc.resultSearch) {
|
||||
t.Errorf("[%d] expected search line %#v, got %#v", i, tc.resultSearch, dnsSearch)
|
||||
}
|
||||
|
||||
for _, expectedEvent := range tc.events {
|
||||
expected := fmt.Sprintf("%s %s %s", v1.EventTypeWarning, "DNSSearchForming", expectedEvent)
|
||||
event := fetchEvent(recorder)
|
||||
if event != expected {
|
||||
t.Errorf("[%d] expected event '%s', got '%s", i, expected, event)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestCleanupBandwidthLimits(t *testing.T) {
|
||||
testPod := func(name, ingress string) *v1.Pod {
|
||||
pod := podWithUidNameNs("", name, "")
|
||||
|
@ -178,6 +260,7 @@ func TestCleanupBandwidthLimits(t *testing.T) {
|
|||
}
|
||||
|
||||
testKube := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||
defer testKube.Cleanup()
|
||||
testKube.kubelet.shaper = shaper
|
||||
|
||||
for _, pod := range test.pods {
|
||||
|
|
12
vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_node_status.go
generated
vendored
12
vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_node_status.go
generated
vendored
|
@ -149,7 +149,7 @@ func (kl *Kubelet) tryRegisterWithApiServer(node *v1.Node) bool {
|
|||
if err := kl.kubeClient.Core().Nodes().Delete(node.Name, nil); err != nil {
|
||||
glog.Errorf("Unable to register node %q with API server: error deleting old node: %v", kl.nodeName, err)
|
||||
} else {
|
||||
glog.Info("Deleted old node object %q", kl.nodeName)
|
||||
glog.Infof("Deleted old node object %q", kl.nodeName)
|
||||
}
|
||||
|
||||
return false
|
||||
|
@ -189,7 +189,7 @@ func (kl *Kubelet) reconcileCMADAnnotationWithExistingNode(node, existingNode *v
|
|||
// labels, information from the cloud provider, and Kubelet configuration.
|
||||
func (kl *Kubelet) initialNode() (*v1.Node, error) {
|
||||
node := &v1.Node{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: string(kl.nodeName),
|
||||
Labels: map[string]string{
|
||||
metav1.LabelHostname: kl.hostname,
|
||||
|
@ -204,7 +204,13 @@ func (kl *Kubelet) initialNode() (*v1.Node, error) {
|
|||
}
|
||||
if len(kl.kubeletConfiguration.RegisterWithTaints) > 0 {
|
||||
annotations := make(map[string]string)
|
||||
b, err := json.Marshal(kl.kubeletConfiguration.RegisterWithTaints)
|
||||
taints := make([]v1.Taint, len(kl.kubeletConfiguration.RegisterWithTaints))
|
||||
for i := range kl.kubeletConfiguration.RegisterWithTaints {
|
||||
if err := v1.Convert_api_Taint_To_v1_Taint(&kl.kubeletConfiguration.RegisterWithTaints[i], &taints[i], nil); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
b, err := json.Marshal(taints)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
28
vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_node_status_test.go
generated
vendored
28
vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_node_status_test.go
generated
vendored
|
@ -114,9 +114,10 @@ func TestUpdateNewNodeStatus(t *testing.T) {
|
|||
inputImageList, expectedImageList := generateTestingImageList(maxImagesInNodeStatus + 1)
|
||||
testKubelet := newTestKubeletWithImageList(
|
||||
t, inputImageList, false /* controllerAttachDetachEnabled */)
|
||||
defer testKubelet.Cleanup()
|
||||
kubelet := testKubelet.kubelet
|
||||
kubeClient := testKubelet.fakeKubeClient
|
||||
existingNode := v1.Node{ObjectMeta: v1.ObjectMeta{Name: testKubeletHostname}}
|
||||
existingNode := v1.Node{ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname}}
|
||||
kubeClient.ReactionChain = fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{existingNode}}).ReactionChain
|
||||
machineInfo := &cadvisorapi.MachineInfo{
|
||||
MachineID: "123",
|
||||
|
@ -140,7 +141,7 @@ func TestUpdateNewNodeStatus(t *testing.T) {
|
|||
}
|
||||
|
||||
expectedNode := &v1.Node{
|
||||
ObjectMeta: v1.ObjectMeta{Name: testKubeletHostname},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname},
|
||||
Spec: v1.NodeSpec{},
|
||||
Status: v1.NodeStatus{
|
||||
Conditions: []v1.NodeCondition{
|
||||
|
@ -253,9 +254,10 @@ func TestUpdateNewNodeStatus(t *testing.T) {
|
|||
|
||||
func TestUpdateNewNodeOutOfDiskStatusWithTransitionFrequency(t *testing.T) {
|
||||
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||
defer testKubelet.Cleanup()
|
||||
kubelet := testKubelet.kubelet
|
||||
kubeClient := testKubelet.fakeKubeClient
|
||||
existingNode := v1.Node{ObjectMeta: v1.ObjectMeta{Name: testKubeletHostname}}
|
||||
existingNode := v1.Node{ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname}}
|
||||
kubeClient.ReactionChain = fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{existingNode}}).ReactionChain
|
||||
machineInfo := &cadvisorapi.MachineInfo{
|
||||
MachineID: "123",
|
||||
|
@ -328,10 +330,11 @@ func TestUpdateNewNodeOutOfDiskStatusWithTransitionFrequency(t *testing.T) {
|
|||
|
||||
func TestUpdateExistingNodeStatus(t *testing.T) {
|
||||
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||
defer testKubelet.Cleanup()
|
||||
kubelet := testKubelet.kubelet
|
||||
kubeClient := testKubelet.fakeKubeClient
|
||||
existingNode := v1.Node{
|
||||
ObjectMeta: v1.ObjectMeta{Name: testKubeletHostname},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname},
|
||||
Spec: v1.NodeSpec{},
|
||||
Status: v1.NodeStatus{
|
||||
Conditions: []v1.NodeCondition{
|
||||
|
@ -403,7 +406,7 @@ func TestUpdateExistingNodeStatus(t *testing.T) {
|
|||
}
|
||||
|
||||
expectedNode := &v1.Node{
|
||||
ObjectMeta: v1.ObjectMeta{Name: testKubeletHostname},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname},
|
||||
Spec: v1.NodeSpec{},
|
||||
Status: v1.NodeStatus{
|
||||
Conditions: []v1.NodeCondition{
|
||||
|
@ -523,6 +526,7 @@ func TestUpdateExistingNodeStatus(t *testing.T) {
|
|||
|
||||
func TestUpdateExistingNodeOutOfDiskStatusWithTransitionFrequency(t *testing.T) {
|
||||
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||
defer testKubelet.Cleanup()
|
||||
kubelet := testKubelet.kubelet
|
||||
clock := testKubelet.fakeClock
|
||||
// Do not set nano second, because apiserver function doesn't support nano second. (Only support
|
||||
|
@ -530,7 +534,7 @@ func TestUpdateExistingNodeOutOfDiskStatusWithTransitionFrequency(t *testing.T)
|
|||
clock.SetTime(time.Unix(123456, 0))
|
||||
kubeClient := testKubelet.fakeKubeClient
|
||||
existingNode := v1.Node{
|
||||
ObjectMeta: v1.ObjectMeta{Name: testKubeletHostname},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname},
|
||||
Spec: v1.NodeSpec{},
|
||||
Status: v1.NodeStatus{
|
||||
Conditions: []v1.NodeCondition{
|
||||
|
@ -681,10 +685,11 @@ func TestUpdateExistingNodeOutOfDiskStatusWithTransitionFrequency(t *testing.T)
|
|||
|
||||
func TestUpdateNodeStatusWithRuntimeStateError(t *testing.T) {
|
||||
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||
defer testKubelet.Cleanup()
|
||||
kubelet := testKubelet.kubelet
|
||||
clock := testKubelet.fakeClock
|
||||
kubeClient := testKubelet.fakeKubeClient
|
||||
existingNode := v1.Node{ObjectMeta: v1.ObjectMeta{Name: testKubeletHostname}}
|
||||
existingNode := v1.Node{ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname}}
|
||||
kubeClient.ReactionChain = fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{existingNode}}).ReactionChain
|
||||
mockCadvisor := testKubelet.fakeCadvisor
|
||||
mockCadvisor.On("Start").Return(nil)
|
||||
|
@ -708,7 +713,7 @@ func TestUpdateNodeStatusWithRuntimeStateError(t *testing.T) {
|
|||
}
|
||||
|
||||
expectedNode := &v1.Node{
|
||||
ObjectMeta: v1.ObjectMeta{Name: testKubeletHostname},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname},
|
||||
Spec: v1.NodeSpec{},
|
||||
Status: v1.NodeStatus{
|
||||
Conditions: []v1.NodeCondition{
|
||||
|
@ -900,6 +905,7 @@ func TestUpdateNodeStatusWithRuntimeStateError(t *testing.T) {
|
|||
|
||||
func TestUpdateNodeStatusError(t *testing.T) {
|
||||
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||
defer testKubelet.Cleanup()
|
||||
kubelet := testKubelet.kubelet
|
||||
// No matching node for the kubelet
|
||||
testKubelet.fakeKubeClient.ReactionChain = fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{}}).ReactionChain
|
||||
|
@ -914,6 +920,7 @@ func TestUpdateNodeStatusError(t *testing.T) {
|
|||
|
||||
func TestRegisterWithApiServer(t *testing.T) {
|
||||
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||
defer testKubelet.Cleanup()
|
||||
kubelet := testKubelet.kubelet
|
||||
kubeClient := testKubelet.fakeKubeClient
|
||||
kubeClient.AddReactor("create", "nodes", func(action core.Action) (bool, runtime.Object, error) {
|
||||
|
@ -925,7 +932,7 @@ func TestRegisterWithApiServer(t *testing.T) {
|
|||
kubeClient.AddReactor("get", "nodes", func(action core.Action) (bool, runtime.Object, error) {
|
||||
// Return an existing (matching) node on get.
|
||||
return true, &v1.Node{
|
||||
ObjectMeta: v1.ObjectMeta{Name: testKubeletHostname},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname},
|
||||
Spec: v1.NodeSpec{ExternalID: testKubeletHostname},
|
||||
}, nil
|
||||
})
|
||||
|
@ -981,7 +988,7 @@ func TestTryRegisterWithApiServer(t *testing.T) {
|
|||
|
||||
newNode := func(cmad bool, externalID string) *v1.Node {
|
||||
node := &v1.Node{
|
||||
ObjectMeta: v1.ObjectMeta{},
|
||||
ObjectMeta: metav1.ObjectMeta{},
|
||||
Spec: v1.NodeSpec{
|
||||
ExternalID: externalID,
|
||||
},
|
||||
|
@ -1094,6 +1101,7 @@ func TestTryRegisterWithApiServer(t *testing.T) {
|
|||
|
||||
for _, tc := range cases {
|
||||
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled is a don't-care for this test */)
|
||||
defer testKubelet.Cleanup()
|
||||
kubelet := testKubelet.kubelet
|
||||
kubeClient := testKubelet.fakeKubeClient
|
||||
|
||||
|
|
106
vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_pods.go
generated
vendored
106
vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_pods.go
generated
vendored
|
@ -33,6 +33,7 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
|
@ -418,26 +419,34 @@ func (kl *Kubelet) makeEnvironmentVariables(pod *v1.Pod, container *v1.Container
|
|||
|
||||
var (
|
||||
configMaps = make(map[string]*v1.ConfigMap)
|
||||
secrets = make(map[string]*v1.Secret)
|
||||
tmpEnv = make(map[string]string)
|
||||
)
|
||||
|
||||
// Env will override EnvFrom variables.
|
||||
// Process EnvFrom first then allow Env to replace existing values.
|
||||
for _, envFrom := range container.EnvFrom {
|
||||
if envFrom.ConfigMapRef != nil {
|
||||
name := envFrom.ConfigMapRef.Name
|
||||
switch {
|
||||
case envFrom.ConfigMapRef != nil:
|
||||
cm := envFrom.ConfigMapRef
|
||||
name := cm.Name
|
||||
configMap, ok := configMaps[name]
|
||||
if !ok {
|
||||
if kl.kubeClient == nil {
|
||||
return result, fmt.Errorf("Couldn't get configMap %v/%v, no kubeClient defined", pod.Namespace, name)
|
||||
}
|
||||
optional := cm.Optional != nil && *cm.Optional
|
||||
configMap, err = kl.kubeClient.Core().ConfigMaps(pod.Namespace).Get(name, metav1.GetOptions{})
|
||||
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) && optional {
|
||||
// ignore error when marked optional
|
||||
continue
|
||||
}
|
||||
return result, err
|
||||
}
|
||||
configMaps[name] = configMap
|
||||
}
|
||||
|
||||
for k, v := range configMap.Data {
|
||||
if len(envFrom.Prefix) > 0 {
|
||||
k = envFrom.Prefix + k
|
||||
|
@ -445,14 +454,37 @@ func (kl *Kubelet) makeEnvironmentVariables(pod *v1.Pod, container *v1.Container
|
|||
if errMsgs := utilvalidation.IsCIdentifier(k); len(errMsgs) != 0 {
|
||||
return result, fmt.Errorf("Invalid environment variable name, %v, from configmap %v/%v: %s", k, pod.Namespace, name, errMsgs[0])
|
||||
}
|
||||
// Accesses apiserver+Pods.
|
||||
// So, the master may set service env vars, or kubelet may. In case both are doing
|
||||
// it, we delete the key from the kubelet-generated ones so we don't have duplicate
|
||||
// env vars.
|
||||
// TODO: remove this next line once all platforms use apiserver+Pods.
|
||||
delete(serviceEnv, k)
|
||||
tmpEnv[k] = v
|
||||
}
|
||||
case envFrom.SecretRef != nil:
|
||||
s := envFrom.SecretRef
|
||||
name := s.Name
|
||||
secret, ok := secrets[name]
|
||||
if !ok {
|
||||
if kl.kubeClient == nil {
|
||||
return result, fmt.Errorf("Couldn't get secret %v/%v, no kubeClient defined", pod.Namespace, name)
|
||||
}
|
||||
optional := s.Optional != nil && *s.Optional
|
||||
secret, err = kl.kubeClient.Core().Secrets(pod.Namespace).Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) && optional {
|
||||
// ignore error when marked optional
|
||||
continue
|
||||
}
|
||||
return result, err
|
||||
}
|
||||
secrets[name] = secret
|
||||
}
|
||||
|
||||
for k, v := range secret.Data {
|
||||
if len(envFrom.Prefix) > 0 {
|
||||
k = envFrom.Prefix + k
|
||||
}
|
||||
if errMsgs := utilvalidation.IsCIdentifier(k); len(errMsgs) != 0 {
|
||||
return result, fmt.Errorf("Invalid environment variable name, %v, from secret %v/%v: %s", k, pod.Namespace, name, errMsgs[0])
|
||||
}
|
||||
tmpEnv[k] = string(v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -466,17 +498,9 @@ func (kl *Kubelet) makeEnvironmentVariables(pod *v1.Pod, container *v1.Container
|
|||
// 2. Create the container's environment in the order variables are declared
|
||||
// 3. Add remaining service environment vars
|
||||
var (
|
||||
secrets = make(map[string]*v1.Secret)
|
||||
mappingFunc = expansion.MappingFuncFor(tmpEnv, serviceEnv)
|
||||
)
|
||||
for _, envVar := range container.Env {
|
||||
// Accesses apiserver+Pods.
|
||||
// So, the master may set service env vars, or kubelet may. In case both are doing
|
||||
// it, we delete the key from the kubelet-generated ones so we don't have duplicate
|
||||
// env vars.
|
||||
// TODO: remove this next line once all platforms use apiserver+Pods.
|
||||
delete(serviceEnv, envVar.Name)
|
||||
|
||||
runtimeVal := envVar.Value
|
||||
if runtimeVal != "" {
|
||||
// Step 1a: expand variable references
|
||||
|
@ -499,8 +523,10 @@ func (kl *Kubelet) makeEnvironmentVariables(pod *v1.Pod, container *v1.Container
|
|||
return result, err
|
||||
}
|
||||
case envVar.ValueFrom.ConfigMapKeyRef != nil:
|
||||
name := envVar.ValueFrom.ConfigMapKeyRef.Name
|
||||
key := envVar.ValueFrom.ConfigMapKeyRef.Key
|
||||
cm := envVar.ValueFrom.ConfigMapKeyRef
|
||||
name := cm.Name
|
||||
key := cm.Key
|
||||
optional := cm.Optional != nil && *cm.Optional
|
||||
configMap, ok := configMaps[name]
|
||||
if !ok {
|
||||
if kl.kubeClient == nil {
|
||||
|
@ -508,35 +534,57 @@ func (kl *Kubelet) makeEnvironmentVariables(pod *v1.Pod, container *v1.Container
|
|||
}
|
||||
configMap, err = kl.kubeClient.Core().ConfigMaps(pod.Namespace).Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) && optional {
|
||||
// ignore error when marked optional
|
||||
continue
|
||||
}
|
||||
return result, err
|
||||
}
|
||||
configMaps[name] = configMap
|
||||
}
|
||||
runtimeVal, ok = configMap.Data[key]
|
||||
if !ok {
|
||||
if optional {
|
||||
continue
|
||||
}
|
||||
return result, fmt.Errorf("Couldn't find key %v in ConfigMap %v/%v", key, pod.Namespace, name)
|
||||
}
|
||||
case envVar.ValueFrom.SecretKeyRef != nil:
|
||||
name := envVar.ValueFrom.SecretKeyRef.Name
|
||||
key := envVar.ValueFrom.SecretKeyRef.Key
|
||||
s := envVar.ValueFrom.SecretKeyRef
|
||||
name := s.Name
|
||||
key := s.Key
|
||||
optional := s.Optional != nil && *s.Optional
|
||||
secret, ok := secrets[name]
|
||||
if !ok {
|
||||
if kl.kubeClient == nil {
|
||||
return result, fmt.Errorf("Couldn't get secret %v/%v, no kubeClient defined", pod.Namespace, name)
|
||||
}
|
||||
secret, err = kl.kubeClient.Core().Secrets(pod.Namespace).Get(name, metav1.GetOptions{})
|
||||
secret, err = kl.secretManager.GetSecret(pod.Namespace, name)
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) && optional {
|
||||
// ignore error when marked optional
|
||||
continue
|
||||
}
|
||||
return result, err
|
||||
}
|
||||
secrets[name] = secret
|
||||
}
|
||||
runtimeValBytes, ok := secret.Data[key]
|
||||
if !ok {
|
||||
if optional {
|
||||
continue
|
||||
}
|
||||
return result, fmt.Errorf("Couldn't find key %v in Secret %v/%v", key, pod.Namespace, name)
|
||||
}
|
||||
runtimeVal = string(runtimeValBytes)
|
||||
}
|
||||
}
|
||||
// Accesses apiserver+Pods.
|
||||
// So, the master may set service env vars, or kubelet may. In case both are doing
|
||||
// it, we delete the key from the kubelet-generated ones so we don't have duplicate
|
||||
// env vars.
|
||||
// TODO: remove this next line once all platforms use apiserver+Pods.
|
||||
delete(serviceEnv, envVar.Name)
|
||||
|
||||
tmpEnv[envVar.Name] = runtimeVal
|
||||
}
|
||||
|
@ -548,7 +596,14 @@ func (kl *Kubelet) makeEnvironmentVariables(pod *v1.Pod, container *v1.Container
|
|||
|
||||
// Append remaining service env vars.
|
||||
for k, v := range serviceEnv {
|
||||
result = append(result, kubecontainer.EnvVar{Name: k, Value: v})
|
||||
// Accesses apiserver+Pods.
|
||||
// So, the master may set service env vars, or kubelet may. In case both are doing
|
||||
// it, we skip the key from the kubelet-generated ones so we don't have duplicate
|
||||
// env vars.
|
||||
// TODO: remove this next line once all platforms use apiserver+Pods.
|
||||
if _, present := tmpEnv[k]; !present {
|
||||
result = append(result, kubecontainer.EnvVar{Name: k, Value: v})
|
||||
}
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
@ -638,14 +693,11 @@ func (kl *Kubelet) makePodDataDirs(pod *v1.Pod) error {
|
|||
|
||||
// getPullSecretsForPod inspects the Pod and retrieves the referenced pull
|
||||
// secrets.
|
||||
// TODO: duplicate secrets are being retrieved multiple times and there
|
||||
// is no cache. Creating and using a secret manager interface will make this
|
||||
// easier to address.
|
||||
func (kl *Kubelet) getPullSecretsForPod(pod *v1.Pod) ([]v1.Secret, error) {
|
||||
pullSecrets := []v1.Secret{}
|
||||
|
||||
for _, secretRef := range pod.Spec.ImagePullSecrets {
|
||||
secret, err := kl.kubeClient.Core().Secrets(pod.Namespace).Get(secretRef.Name, metav1.GetOptions{})
|
||||
secret, err := kl.secretManager.GetSecret(pod.Namespace, secretRef.Name)
|
||||
if err != nil {
|
||||
glog.Warningf("Unable to retrieve pull secret %s/%s for %s/%s due to %v. The image pull may not succeed.", pod.Namespace, secretRef.Name, pod.Namespace, pod.Name, err)
|
||||
continue
|
||||
|
|
334
vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_pods_test.go
generated
vendored
334
vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_pods_test.go
generated
vendored
|
@ -26,6 +26,8 @@ import (
|
|||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
|
@ -113,6 +115,7 @@ func TestMakeMounts(t *testing.T) {
|
|||
|
||||
func TestRunInContainerNoSuchPod(t *testing.T) {
|
||||
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||
defer testKubelet.Cleanup()
|
||||
kubelet := testKubelet.kubelet
|
||||
fakeRuntime := testKubelet.fakeRuntime
|
||||
fakeRuntime.PodList = []*containertest.FakePod{}
|
||||
|
@ -121,7 +124,7 @@ func TestRunInContainerNoSuchPod(t *testing.T) {
|
|||
podNamespace := "nsFoo"
|
||||
containerName := "containerFoo"
|
||||
output, err := kubelet.RunInContainer(
|
||||
kubecontainer.GetPodFullName(&v1.Pod{ObjectMeta: v1.ObjectMeta{Name: podName, Namespace: podNamespace}}),
|
||||
kubecontainer.GetPodFullName(&v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: podName, Namespace: podNamespace}}),
|
||||
"",
|
||||
containerName,
|
||||
[]string{"ls"})
|
||||
|
@ -132,6 +135,7 @@ func TestRunInContainerNoSuchPod(t *testing.T) {
|
|||
func TestRunInContainer(t *testing.T) {
|
||||
for _, testError := range []error{nil, errors.New("bar")} {
|
||||
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||
defer testKubelet.Cleanup()
|
||||
kubelet := testKubelet.kubelet
|
||||
fakeRuntime := testKubelet.fakeRuntime
|
||||
fakeCommandRunner := containertest.FakeContainerCommandRunner{
|
||||
|
@ -165,6 +169,7 @@ func TestRunInContainer(t *testing.T) {
|
|||
|
||||
func TestGenerateRunContainerOptions_DNSConfigurationParams(t *testing.T) {
|
||||
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||
defer testKubelet.Cleanup()
|
||||
kubelet := testKubelet.kubelet
|
||||
|
||||
clusterNS := "203.0.113.1"
|
||||
|
@ -210,7 +215,11 @@ func TestGenerateRunContainerOptions_DNSConfigurationParams(t *testing.T) {
|
|||
} else if options[0].DNS[0] != clusterNS {
|
||||
t.Errorf("expected nameserver %s, got %v", clusterNS, options[0].DNS[0])
|
||||
}
|
||||
if len(options[0].DNSSearch) != len(options[1].DNSSearch)+3 {
|
||||
expLength := len(options[1].DNSSearch) + 3
|
||||
if expLength > 6 {
|
||||
expLength = 6
|
||||
}
|
||||
if len(options[0].DNSSearch) != expLength {
|
||||
t.Errorf("expected prepend of cluster domain, got %+v", options[0].DNSSearch)
|
||||
} else if options[0].DNSSearch[0] != ".svc."+kubelet.clusterDomain {
|
||||
t.Errorf("expected domain %s, got %s", ".svc."+kubelet.clusterDomain, options[0].DNSSearch)
|
||||
|
@ -237,7 +246,7 @@ func (e envs) Less(i, j int) bool { return e[i].Name < e[j].Name }
|
|||
|
||||
func buildService(name, namespace, clusterIP, protocol string, port int) *v1.Service {
|
||||
return &v1.Service{
|
||||
ObjectMeta: v1.ObjectMeta{Name: name, Namespace: namespace},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: namespace},
|
||||
Spec: v1.ServiceSpec{
|
||||
Ports: []v1.ServicePort{{
|
||||
Protocol: v1.Protocol(protocol),
|
||||
|
@ -249,8 +258,9 @@ func buildService(name, namespace, clusterIP, protocol string, port int) *v1.Ser
|
|||
}
|
||||
|
||||
func TestMakeEnvironmentVariables(t *testing.T) {
|
||||
trueVal := true
|
||||
services := []*v1.Service{
|
||||
buildService("kubernetes", v1.NamespaceDefault, "1.2.3.1", "TCP", 8081),
|
||||
buildService("kubernetes", metav1.NamespaceDefault, "1.2.3.1", "TCP", 8081),
|
||||
buildService("test", "test1", "1.2.3.3", "TCP", 8083),
|
||||
buildService("kubernetes", "test2", "1.2.3.4", "TCP", 8084),
|
||||
buildService("test", "test2", "1.2.3.5", "TCP", 8085),
|
||||
|
@ -269,6 +279,7 @@ func TestMakeEnvironmentVariables(t *testing.T) {
|
|||
masterServiceNs string // the namespace to read master service info from
|
||||
nilLister bool // whether the lister should be nil
|
||||
configMap *v1.ConfigMap // an optional ConfigMap to pull from
|
||||
secret *v1.Secret // an optional Secret to pull from
|
||||
expectedEnvs []kubecontainer.EnvVar // a set of expected environment vars
|
||||
expectedError bool // does the test fail
|
||||
}{
|
||||
|
@ -287,7 +298,7 @@ func TestMakeEnvironmentVariables(t *testing.T) {
|
|||
{Name: "TEST_PORT_8083_TCP_ADDR", Value: "1.2.3.3"},
|
||||
},
|
||||
},
|
||||
masterServiceNs: v1.NamespaceDefault,
|
||||
masterServiceNs: metav1.NamespaceDefault,
|
||||
nilLister: false,
|
||||
expectedEnvs: []kubecontainer.EnvVar{
|
||||
{Name: "FOO", Value: "BAR"},
|
||||
|
@ -322,7 +333,7 @@ func TestMakeEnvironmentVariables(t *testing.T) {
|
|||
{Name: "TEST_PORT_8083_TCP_ADDR", Value: "1.2.3.3"},
|
||||
},
|
||||
},
|
||||
masterServiceNs: v1.NamespaceDefault,
|
||||
masterServiceNs: metav1.NamespaceDefault,
|
||||
nilLister: true,
|
||||
expectedEnvs: []kubecontainer.EnvVar{
|
||||
{Name: "FOO", Value: "BAR"},
|
||||
|
@ -343,7 +354,7 @@ func TestMakeEnvironmentVariables(t *testing.T) {
|
|||
{Name: "FOO", Value: "BAZ"},
|
||||
},
|
||||
},
|
||||
masterServiceNs: v1.NamespaceDefault,
|
||||
masterServiceNs: metav1.NamespaceDefault,
|
||||
nilLister: false,
|
||||
expectedEnvs: []kubecontainer.EnvVar{
|
||||
{Name: "FOO", Value: "BAZ"},
|
||||
|
@ -607,6 +618,106 @@ func TestMakeEnvironmentVariables(t *testing.T) {
|
|||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "configmapkeyref_missing_optional",
|
||||
ns: "test",
|
||||
container: &v1.Container{
|
||||
Env: []v1.EnvVar{
|
||||
{
|
||||
Name: "POD_NAME",
|
||||
ValueFrom: &v1.EnvVarSource{
|
||||
ConfigMapKeyRef: &v1.ConfigMapKeySelector{
|
||||
LocalObjectReference: v1.LocalObjectReference{Name: "missing-config-map"},
|
||||
Key: "key",
|
||||
Optional: &trueVal,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
masterServiceNs: "nothing",
|
||||
expectedEnvs: nil,
|
||||
},
|
||||
{
|
||||
name: "configmapkeyref_missing_key_optional",
|
||||
ns: "test",
|
||||
container: &v1.Container{
|
||||
Env: []v1.EnvVar{
|
||||
{
|
||||
Name: "POD_NAME",
|
||||
ValueFrom: &v1.EnvVarSource{
|
||||
ConfigMapKeyRef: &v1.ConfigMapKeySelector{
|
||||
LocalObjectReference: v1.LocalObjectReference{Name: "test-config-map"},
|
||||
Key: "key",
|
||||
Optional: &trueVal,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
masterServiceNs: "nothing",
|
||||
nilLister: true,
|
||||
configMap: &v1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "test1",
|
||||
Name: "test-configmap",
|
||||
},
|
||||
Data: map[string]string{
|
||||
"a": "b",
|
||||
},
|
||||
},
|
||||
expectedEnvs: nil,
|
||||
},
|
||||
{
|
||||
name: "secretkeyref_missing_optional",
|
||||
ns: "test",
|
||||
container: &v1.Container{
|
||||
Env: []v1.EnvVar{
|
||||
{
|
||||
Name: "POD_NAME",
|
||||
ValueFrom: &v1.EnvVarSource{
|
||||
SecretKeyRef: &v1.SecretKeySelector{
|
||||
LocalObjectReference: v1.LocalObjectReference{Name: "missing-secret"},
|
||||
Key: "key",
|
||||
Optional: &trueVal,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
masterServiceNs: "nothing",
|
||||
expectedEnvs: nil,
|
||||
},
|
||||
{
|
||||
name: "secretkeyref_missing_key_optional",
|
||||
ns: "test",
|
||||
container: &v1.Container{
|
||||
Env: []v1.EnvVar{
|
||||
{
|
||||
Name: "POD_NAME",
|
||||
ValueFrom: &v1.EnvVarSource{
|
||||
SecretKeyRef: &v1.SecretKeySelector{
|
||||
LocalObjectReference: v1.LocalObjectReference{Name: "test-secret"},
|
||||
Key: "key",
|
||||
Optional: &trueVal,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
masterServiceNs: "nothing",
|
||||
nilLister: true,
|
||||
secret: &v1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "test1",
|
||||
Name: "test-secret",
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
"a": []byte("b"),
|
||||
},
|
||||
},
|
||||
expectedEnvs: nil,
|
||||
},
|
||||
{
|
||||
name: "configmap",
|
||||
ns: "test1",
|
||||
|
@ -638,7 +749,7 @@ func TestMakeEnvironmentVariables(t *testing.T) {
|
|||
masterServiceNs: "nothing",
|
||||
nilLister: false,
|
||||
configMap: &v1.ConfigMap{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "test1",
|
||||
Name: "test-configmap",
|
||||
},
|
||||
|
@ -713,6 +824,19 @@ func TestMakeEnvironmentVariables(t *testing.T) {
|
|||
masterServiceNs: "nothing",
|
||||
expectedError: true,
|
||||
},
|
||||
{
|
||||
name: "configmap_missing_optional",
|
||||
ns: "test",
|
||||
container: &v1.Container{
|
||||
EnvFrom: []v1.EnvFromSource{
|
||||
{ConfigMapRef: &v1.ConfigMapEnvSource{
|
||||
Optional: &trueVal,
|
||||
LocalObjectReference: v1.LocalObjectReference{Name: "missing-config-map"}}},
|
||||
},
|
||||
},
|
||||
masterServiceNs: "nothing",
|
||||
expectedEnvs: nil,
|
||||
},
|
||||
{
|
||||
name: "configmap_invalid_keys",
|
||||
ns: "test1",
|
||||
|
@ -723,7 +847,7 @@ func TestMakeEnvironmentVariables(t *testing.T) {
|
|||
},
|
||||
masterServiceNs: "nothing",
|
||||
configMap: &v1.ConfigMap{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "test1",
|
||||
Name: "test-configmap",
|
||||
},
|
||||
|
@ -746,7 +870,7 @@ func TestMakeEnvironmentVariables(t *testing.T) {
|
|||
},
|
||||
masterServiceNs: "",
|
||||
configMap: &v1.ConfigMap{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "test1",
|
||||
Name: "test-configmap",
|
||||
},
|
||||
|
@ -761,10 +885,178 @@ func TestMakeEnvironmentVariables(t *testing.T) {
|
|||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "secret",
|
||||
ns: "test1",
|
||||
container: &v1.Container{
|
||||
EnvFrom: []v1.EnvFromSource{
|
||||
{
|
||||
SecretRef: &v1.SecretEnvSource{LocalObjectReference: v1.LocalObjectReference{Name: "test-secret"}},
|
||||
},
|
||||
{
|
||||
Prefix: "p_",
|
||||
SecretRef: &v1.SecretEnvSource{LocalObjectReference: v1.LocalObjectReference{Name: "test-secret"}},
|
||||
},
|
||||
},
|
||||
Env: []v1.EnvVar{
|
||||
{
|
||||
Name: "TEST_LITERAL",
|
||||
Value: "test-test-test",
|
||||
},
|
||||
{
|
||||
Name: "EXPANSION_TEST",
|
||||
Value: "$(REPLACE_ME)",
|
||||
},
|
||||
{
|
||||
Name: "DUPE_TEST",
|
||||
Value: "ENV_VAR",
|
||||
},
|
||||
},
|
||||
},
|
||||
masterServiceNs: "nothing",
|
||||
nilLister: false,
|
||||
secret: &v1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "test1",
|
||||
Name: "test-secret",
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
"REPLACE_ME": []byte("FROM_SECRET"),
|
||||
"DUPE_TEST": []byte("SECRET"),
|
||||
},
|
||||
},
|
||||
expectedEnvs: []kubecontainer.EnvVar{
|
||||
{
|
||||
Name: "TEST_LITERAL",
|
||||
Value: "test-test-test",
|
||||
},
|
||||
{
|
||||
Name: "TEST_SERVICE_HOST",
|
||||
Value: "1.2.3.3",
|
||||
},
|
||||
{
|
||||
Name: "TEST_SERVICE_PORT",
|
||||
Value: "8083",
|
||||
},
|
||||
{
|
||||
Name: "TEST_PORT",
|
||||
Value: "tcp://1.2.3.3:8083",
|
||||
},
|
||||
{
|
||||
Name: "TEST_PORT_8083_TCP",
|
||||
Value: "tcp://1.2.3.3:8083",
|
||||
},
|
||||
{
|
||||
Name: "TEST_PORT_8083_TCP_PROTO",
|
||||
Value: "tcp",
|
||||
},
|
||||
{
|
||||
Name: "TEST_PORT_8083_TCP_PORT",
|
||||
Value: "8083",
|
||||
},
|
||||
{
|
||||
Name: "TEST_PORT_8083_TCP_ADDR",
|
||||
Value: "1.2.3.3",
|
||||
},
|
||||
{
|
||||
Name: "REPLACE_ME",
|
||||
Value: "FROM_SECRET",
|
||||
},
|
||||
{
|
||||
Name: "EXPANSION_TEST",
|
||||
Value: "FROM_SECRET",
|
||||
},
|
||||
{
|
||||
Name: "DUPE_TEST",
|
||||
Value: "ENV_VAR",
|
||||
},
|
||||
{
|
||||
Name: "p_REPLACE_ME",
|
||||
Value: "FROM_SECRET",
|
||||
},
|
||||
{
|
||||
Name: "p_DUPE_TEST",
|
||||
Value: "SECRET",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "secret_missing",
|
||||
ns: "test1",
|
||||
container: &v1.Container{
|
||||
EnvFrom: []v1.EnvFromSource{
|
||||
{SecretRef: &v1.SecretEnvSource{LocalObjectReference: v1.LocalObjectReference{Name: "test-secret"}}},
|
||||
},
|
||||
},
|
||||
masterServiceNs: "nothing",
|
||||
expectedError: true,
|
||||
},
|
||||
{
|
||||
name: "secret_missing_optional",
|
||||
ns: "test",
|
||||
container: &v1.Container{
|
||||
EnvFrom: []v1.EnvFromSource{
|
||||
{SecretRef: &v1.SecretEnvSource{
|
||||
LocalObjectReference: v1.LocalObjectReference{Name: "missing-secret"},
|
||||
Optional: &trueVal}},
|
||||
},
|
||||
},
|
||||
masterServiceNs: "nothing",
|
||||
expectedEnvs: nil,
|
||||
},
|
||||
{
|
||||
name: "secret_invalid_keys",
|
||||
ns: "test1",
|
||||
container: &v1.Container{
|
||||
EnvFrom: []v1.EnvFromSource{
|
||||
{SecretRef: &v1.SecretEnvSource{LocalObjectReference: v1.LocalObjectReference{Name: "test-secret"}}},
|
||||
},
|
||||
},
|
||||
masterServiceNs: "nothing",
|
||||
secret: &v1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "test1",
|
||||
Name: "test-secret",
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
"1234": []byte("abc"),
|
||||
},
|
||||
},
|
||||
expectedError: true,
|
||||
},
|
||||
{
|
||||
name: "secret_invalid_keys_valid",
|
||||
ns: "test",
|
||||
container: &v1.Container{
|
||||
EnvFrom: []v1.EnvFromSource{
|
||||
{
|
||||
Prefix: "p_",
|
||||
SecretRef: &v1.SecretEnvSource{LocalObjectReference: v1.LocalObjectReference{Name: "test-secret"}},
|
||||
},
|
||||
},
|
||||
},
|
||||
masterServiceNs: "",
|
||||
secret: &v1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "test1",
|
||||
Name: "test-secret",
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
"1234": []byte("abc"),
|
||||
},
|
||||
},
|
||||
expectedEnvs: []kubecontainer.EnvVar{
|
||||
{
|
||||
Name: "p_1234",
|
||||
Value: "abc",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||
defer testKubelet.Cleanup()
|
||||
kl := testKubelet.kubelet
|
||||
kl.masterServiceNamespace = tc.masterServiceNs
|
||||
if tc.nilLister {
|
||||
|
@ -776,13 +1068,28 @@ func TestMakeEnvironmentVariables(t *testing.T) {
|
|||
testKubelet.fakeKubeClient.AddReactor("get", "configmaps", func(action core.Action) (bool, runtime.Object, error) {
|
||||
var err error
|
||||
if tc.configMap == nil {
|
||||
err = errors.New("no configmap defined")
|
||||
err = apierrors.NewNotFound(action.GetResource().GroupResource(), "configmap-name")
|
||||
}
|
||||
return true, tc.configMap, err
|
||||
})
|
||||
testKubelet.fakeKubeClient.AddReactor("get", "secrets", func(action core.Action) (bool, runtime.Object, error) {
|
||||
var err error
|
||||
if tc.secret == nil {
|
||||
err = apierrors.NewNotFound(action.GetResource().GroupResource(), "secret-name")
|
||||
}
|
||||
return true, tc.secret, err
|
||||
})
|
||||
|
||||
testKubelet.fakeKubeClient.AddReactor("get", "secrets", func(action core.Action) (bool, runtime.Object, error) {
|
||||
var err error
|
||||
if tc.secret == nil {
|
||||
err = errors.New("no secret defined")
|
||||
}
|
||||
return true, tc.secret, err
|
||||
})
|
||||
|
||||
testPod := &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: tc.ns,
|
||||
Name: "dapi-test-pod-name",
|
||||
},
|
||||
|
@ -1230,6 +1537,7 @@ func TestExec(t *testing.T) {
|
|||
|
||||
for _, tc := range testcases {
|
||||
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||
defer testKubelet.Cleanup()
|
||||
kubelet := testKubelet.kubelet
|
||||
testKubelet.fakeRuntime.PodList = []*containertest.FakePod{
|
||||
{Pod: &kubecontainer.Pod{
|
||||
|
@ -1320,6 +1628,7 @@ func TestPortForward(t *testing.T) {
|
|||
|
||||
for _, tc := range testcases {
|
||||
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||
defer testKubelet.Cleanup()
|
||||
kubelet := testKubelet.kubelet
|
||||
testKubelet.fakeRuntime.PodList = []*containertest.FakePod{
|
||||
{Pod: &kubecontainer.Pod{
|
||||
|
@ -1467,6 +1776,7 @@ func TestHasHostMountPVC(t *testing.T) {
|
|||
|
||||
for k, v := range tests {
|
||||
testKubelet := newTestKubelet(t, false)
|
||||
defer testKubelet.Cleanup()
|
||||
pod := &v1.Pod{
|
||||
Spec: v1.PodSpec{},
|
||||
}
|
||||
|
|
1
vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_resources_test.go
generated
vendored
1
vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_resources_test.go
generated
vendored
|
@ -33,6 +33,7 @@ func TestPodResourceLimitsDefaulting(t *testing.T) {
|
|||
cpuCores := resource.MustParse("10")
|
||||
memoryCapacity := resource.MustParse("10Gi")
|
||||
tk := newTestKubelet(t, true)
|
||||
defer tk.Cleanup()
|
||||
tk.fakeCadvisor.On("VersionInfo").Return(&cadvisorapi.VersionInfo{}, nil)
|
||||
tk.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{
|
||||
NumCores: int(cpuCores.Value()),
|
||||
|
|
102
vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_test.go
generated
vendored
102
vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_test.go
generated
vendored
|
@ -33,6 +33,8 @@ import (
|
|||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/util/clock"
|
||||
"k8s.io/client-go/util/flowcontrol"
|
||||
"k8s.io/kubernetes/pkg/api/resource"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/apis/componentconfig"
|
||||
|
@ -54,13 +56,12 @@ import (
|
|||
podtest "k8s.io/kubernetes/pkg/kubelet/pod/testing"
|
||||
proberesults "k8s.io/kubernetes/pkg/kubelet/prober/results"
|
||||
probetest "k8s.io/kubernetes/pkg/kubelet/prober/testing"
|
||||
"k8s.io/kubernetes/pkg/kubelet/secret"
|
||||
"k8s.io/kubernetes/pkg/kubelet/server/stats"
|
||||
"k8s.io/kubernetes/pkg/kubelet/status"
|
||||
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
"k8s.io/kubernetes/pkg/kubelet/util/queue"
|
||||
kubeletvolume "k8s.io/kubernetes/pkg/kubelet/volumemanager"
|
||||
"k8s.io/kubernetes/pkg/util/clock"
|
||||
"k8s.io/kubernetes/pkg/util/flowcontrol"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
_ "k8s.io/kubernetes/pkg/volume/host_path"
|
||||
|
@ -106,6 +107,12 @@ type TestKubelet struct {
|
|||
volumePlugin *volumetest.FakeVolumePlugin
|
||||
}
|
||||
|
||||
func (tk *TestKubelet) Cleanup() {
|
||||
if tk.kubelet != nil {
|
||||
os.RemoveAll(tk.kubelet.rootDirectory)
|
||||
}
|
||||
}
|
||||
|
||||
// newTestKubelet returns test kubelet with two images.
|
||||
func newTestKubelet(t *testing.T, controllerAttachDetachEnabled bool) *TestKubelet {
|
||||
imageList := []kubecontainer.Image{
|
||||
|
@ -152,7 +159,7 @@ func newTestKubeletWithImageList(
|
|||
t.Fatalf("can't mkdir(%q): %v", kubelet.rootDirectory, err)
|
||||
}
|
||||
kubelet.sourcesReady = config.NewSourcesReady(func(_ sets.String) bool { return true })
|
||||
kubelet.masterServiceNamespace = v1.NamespaceDefault
|
||||
kubelet.masterServiceNamespace = metav1.NamespaceDefault
|
||||
kubelet.serviceLister = testServiceLister{}
|
||||
kubelet.nodeLister = testNodeLister{}
|
||||
kubelet.nodeInfo = testNodeInfo{}
|
||||
|
@ -166,7 +173,12 @@ func newTestKubeletWithImageList(
|
|||
kubelet.cadvisor = mockCadvisor
|
||||
|
||||
fakeMirrorClient := podtest.NewFakeMirrorClient()
|
||||
kubelet.podManager = kubepod.NewBasicPodManager(fakeMirrorClient)
|
||||
secretManager, err := secret.NewSimpleSecretManager(kubelet.kubeClient)
|
||||
if err != nil {
|
||||
t.Fatalf("can't create a secret manager: %v", err)
|
||||
}
|
||||
kubelet.secretManager = secretManager
|
||||
kubelet.podManager = kubepod.NewBasicPodManager(fakeMirrorClient, kubelet.secretManager)
|
||||
kubelet.statusManager = status.NewManager(fakeKubeClient, kubelet.podManager)
|
||||
kubelet.containerRefManager = kubecontainer.NewRefManager()
|
||||
diskSpaceManager, err := newDiskSpaceManager(mockCadvisor, DiskSpacePolicy{})
|
||||
|
@ -241,7 +253,7 @@ func newTestKubeletWithImageList(
|
|||
|
||||
plug := &volumetest.FakeVolumePlugin{PluginName: "fake", Host: nil}
|
||||
kubelet.volumePluginMgr, err =
|
||||
NewInitializedVolumePluginMgr(kubelet, []volume.VolumePlugin{plug})
|
||||
NewInitializedVolumePluginMgr(kubelet, kubelet.secretManager, []volume.VolumePlugin{plug})
|
||||
require.NoError(t, err, "Failed to initialize VolumePluginMgr")
|
||||
|
||||
kubelet.mounter = &mount.FakeMounter{}
|
||||
|
@ -255,7 +267,8 @@ func newTestKubeletWithImageList(
|
|||
kubelet.mounter,
|
||||
kubelet.getPodsDir(),
|
||||
kubelet.recorder,
|
||||
false /* experimentalCheckNodeCapabilitiesBeforeMount*/)
|
||||
false, /* experimentalCheckNodeCapabilitiesBeforeMount*/
|
||||
false /* keepTerminatedPodVolumes */)
|
||||
require.NoError(t, err, "Failed to initialize volume manager")
|
||||
|
||||
// enable active deadline handler
|
||||
|
@ -275,7 +288,7 @@ func newTestPods(count int) []*v1.Pod {
|
|||
Spec: v1.PodSpec{
|
||||
HostNetwork: true,
|
||||
},
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
UID: types.UID(10000 + i),
|
||||
Name: fmt.Sprintf("pod%d", i),
|
||||
},
|
||||
|
@ -288,6 +301,7 @@ var emptyPodUIDs map[types.UID]kubetypes.SyncPodType
|
|||
|
||||
func TestSyncLoopTimeUpdate(t *testing.T) {
|
||||
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||
defer testKubelet.Cleanup()
|
||||
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
|
||||
kubelet := testKubelet.kubelet
|
||||
|
||||
|
@ -313,6 +327,7 @@ func TestSyncLoopTimeUpdate(t *testing.T) {
|
|||
|
||||
func TestSyncLoopAbort(t *testing.T) {
|
||||
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||
defer testKubelet.Cleanup()
|
||||
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
|
||||
kubelet := testKubelet.kubelet
|
||||
kubelet.runtimeState.setRuntimeSync(time.Now())
|
||||
|
@ -333,6 +348,7 @@ func TestSyncLoopAbort(t *testing.T) {
|
|||
|
||||
func TestSyncPodsStartPod(t *testing.T) {
|
||||
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||
defer testKubelet.Cleanup()
|
||||
testKubelet.fakeCadvisor.On("VersionInfo").Return(&cadvisorapi.VersionInfo{}, nil)
|
||||
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
|
||||
testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
|
||||
|
@ -355,6 +371,7 @@ func TestSyncPodsDeletesWhenSourcesAreReady(t *testing.T) {
|
|||
ready := false
|
||||
|
||||
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||
defer testKubelet.Cleanup()
|
||||
fakeRuntime := testKubelet.fakeRuntime
|
||||
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
|
||||
testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
|
||||
|
@ -409,6 +426,7 @@ func (ls testNodeLister) List() (v1.NodeList, error) {
|
|||
// Tests that we handle port conflicts correctly by setting the failed status in status map.
|
||||
func TestHandlePortConflicts(t *testing.T) {
|
||||
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||
defer testKubelet.Cleanup()
|
||||
kl := testKubelet.kubelet
|
||||
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
|
||||
testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
|
||||
|
@ -416,7 +434,7 @@ func TestHandlePortConflicts(t *testing.T) {
|
|||
|
||||
kl.nodeLister = testNodeLister{nodes: []v1.Node{
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{Name: string(kl.nodeName)},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: string(kl.nodeName)},
|
||||
Status: v1.NodeStatus{
|
||||
Allocatable: v1.ResourceList{
|
||||
v1.ResourcePods: *resource.NewQuantity(110, resource.DecimalSI),
|
||||
|
@ -426,7 +444,7 @@ func TestHandlePortConflicts(t *testing.T) {
|
|||
}}
|
||||
kl.nodeInfo = testNodeInfo{nodes: []v1.Node{
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{Name: string(kl.nodeName)},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: string(kl.nodeName)},
|
||||
Status: v1.NodeStatus{
|
||||
Allocatable: v1.ResourceList{
|
||||
v1.ResourcePods: *resource.NewQuantity(110, resource.DecimalSI),
|
||||
|
@ -465,7 +483,7 @@ func TestCriticalPrioritySorting(t *testing.T) {
|
|||
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||
kl := testKubelet.kubelet
|
||||
nodes := []v1.Node{
|
||||
{ObjectMeta: v1.ObjectMeta{Name: testKubeletHostname},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname},
|
||||
Status: v1.NodeStatus{Capacity: v1.ResourceList{}, Allocatable: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(10, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(100, resource.BinarySI),
|
||||
|
@ -526,6 +544,7 @@ func TestCriticalPrioritySorting(t *testing.T) {
|
|||
// Tests that we handle host name conflicts correctly by setting the failed status in status map.
|
||||
func TestHandleHostNameConflicts(t *testing.T) {
|
||||
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||
defer testKubelet.Cleanup()
|
||||
kl := testKubelet.kubelet
|
||||
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
|
||||
testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
|
||||
|
@ -533,7 +552,7 @@ func TestHandleHostNameConflicts(t *testing.T) {
|
|||
|
||||
kl.nodeLister = testNodeLister{nodes: []v1.Node{
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{Name: "127.0.0.1"},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "127.0.0.1"},
|
||||
Status: v1.NodeStatus{
|
||||
Allocatable: v1.ResourceList{
|
||||
v1.ResourcePods: *resource.NewQuantity(110, resource.DecimalSI),
|
||||
|
@ -543,7 +562,7 @@ func TestHandleHostNameConflicts(t *testing.T) {
|
|||
}}
|
||||
kl.nodeInfo = testNodeInfo{nodes: []v1.Node{
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{Name: "127.0.0.1"},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "127.0.0.1"},
|
||||
Status: v1.NodeStatus{
|
||||
Allocatable: v1.ResourceList{
|
||||
v1.ResourcePods: *resource.NewQuantity(110, resource.DecimalSI),
|
||||
|
@ -577,10 +596,11 @@ func TestHandleHostNameConflicts(t *testing.T) {
|
|||
// Tests that we handle not matching labels selector correctly by setting the failed status in status map.
|
||||
func TestHandleNodeSelector(t *testing.T) {
|
||||
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||
defer testKubelet.Cleanup()
|
||||
kl := testKubelet.kubelet
|
||||
nodes := []v1.Node{
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{Name: testKubeletHostname, Labels: map[string]string{"key": "B"}},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname, Labels: map[string]string{"key": "B"}},
|
||||
Status: v1.NodeStatus{
|
||||
Allocatable: v1.ResourceList{
|
||||
v1.ResourcePods: *resource.NewQuantity(110, resource.DecimalSI),
|
||||
|
@ -617,9 +637,10 @@ func TestHandleNodeSelector(t *testing.T) {
|
|||
// Tests that we handle exceeded resources correctly by setting the failed status in status map.
|
||||
func TestHandleMemExceeded(t *testing.T) {
|
||||
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||
defer testKubelet.Cleanup()
|
||||
kl := testKubelet.kubelet
|
||||
nodes := []v1.Node{
|
||||
{ObjectMeta: v1.ObjectMeta{Name: testKubeletHostname},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname},
|
||||
Status: v1.NodeStatus{Capacity: v1.ResourceList{}, Allocatable: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(10, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(100, resource.BinarySI),
|
||||
|
@ -666,6 +687,7 @@ func TestHandleMemExceeded(t *testing.T) {
|
|||
// TODO(filipg): This test should be removed once StatusSyncer can do garbage collection without external signal.
|
||||
func TestPurgingObsoleteStatusMapEntries(t *testing.T) {
|
||||
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||
defer testKubelet.Cleanup()
|
||||
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
|
||||
testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
|
||||
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
|
||||
|
@ -678,8 +700,8 @@ func TestPurgingObsoleteStatusMapEntries(t *testing.T) {
|
|||
|
||||
kl := testKubelet.kubelet
|
||||
pods := []*v1.Pod{
|
||||
{ObjectMeta: v1.ObjectMeta{Name: "pod1", UID: "1234"}, Spec: v1.PodSpec{Containers: []v1.Container{{Ports: []v1.ContainerPort{{HostPort: 80}}}}}},
|
||||
{ObjectMeta: v1.ObjectMeta{Name: "pod2", UID: "4567"}, Spec: v1.PodSpec{Containers: []v1.Container{{Ports: []v1.ContainerPort{{HostPort: 80}}}}}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "pod1", UID: "1234"}, Spec: v1.PodSpec{Containers: []v1.Container{{Ports: []v1.ContainerPort{{HostPort: 80}}}}}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "pod2", UID: "4567"}, Spec: v1.PodSpec{Containers: []v1.Container{{Ports: []v1.ContainerPort{{HostPort: 80}}}}}},
|
||||
}
|
||||
podToTest := pods[1]
|
||||
// Run once to populate the status map.
|
||||
|
@ -697,6 +719,7 @@ func TestPurgingObsoleteStatusMapEntries(t *testing.T) {
|
|||
|
||||
func TestValidateContainerLogStatus(t *testing.T) {
|
||||
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||
defer testKubelet.Cleanup()
|
||||
kubelet := testKubelet.kubelet
|
||||
containerName := "x"
|
||||
testCases := []struct {
|
||||
|
@ -823,6 +846,7 @@ func updateDiskSpacePolicy(kubelet *Kubelet, mockCadvisor *cadvisortest.Mock, ro
|
|||
func TestCreateMirrorPod(t *testing.T) {
|
||||
for _, updateType := range []kubetypes.SyncPodType{kubetypes.SyncPodCreate, kubetypes.SyncPodUpdate} {
|
||||
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||
defer testKubelet.Cleanup()
|
||||
testKubelet.fakeCadvisor.On("Start").Return(nil)
|
||||
testKubelet.fakeCadvisor.On("VersionInfo").Return(&cadvisorapi.VersionInfo{}, nil)
|
||||
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
|
||||
|
@ -849,6 +873,7 @@ func TestCreateMirrorPod(t *testing.T) {
|
|||
|
||||
func TestDeleteOutdatedMirrorPod(t *testing.T) {
|
||||
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||
defer testKubelet.Cleanup()
|
||||
testKubelet.fakeCadvisor.On("Start").Return(nil)
|
||||
testKubelet.fakeCadvisor.On("VersionInfo").Return(&cadvisorapi.VersionInfo{}, nil)
|
||||
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
|
||||
|
@ -891,6 +916,7 @@ func TestDeleteOutdatedMirrorPod(t *testing.T) {
|
|||
|
||||
func TestDeleteOrphanedMirrorPods(t *testing.T) {
|
||||
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||
defer testKubelet.Cleanup()
|
||||
testKubelet.fakeCadvisor.On("Start").Return(nil)
|
||||
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
|
||||
testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
|
||||
|
@ -900,7 +926,7 @@ func TestDeleteOrphanedMirrorPods(t *testing.T) {
|
|||
manager := testKubelet.fakeMirrorClient
|
||||
orphanPods := []*v1.Pod{
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
UID: "12345678",
|
||||
Name: "pod1",
|
||||
Namespace: "ns",
|
||||
|
@ -911,7 +937,7 @@ func TestDeleteOrphanedMirrorPods(t *testing.T) {
|
|||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
UID: "12345679",
|
||||
Name: "pod2",
|
||||
Namespace: "ns",
|
||||
|
@ -941,7 +967,7 @@ func TestGetContainerInfoForMirrorPods(t *testing.T) {
|
|||
// different UIDs.
|
||||
pods := []*v1.Pod{
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
UID: "1234",
|
||||
Name: "qux",
|
||||
Namespace: "ns",
|
||||
|
@ -956,7 +982,7 @@ func TestGetContainerInfoForMirrorPods(t *testing.T) {
|
|||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
UID: "5678",
|
||||
Name: "qux",
|
||||
Namespace: "ns",
|
||||
|
@ -982,6 +1008,7 @@ func TestGetContainerInfoForMirrorPods(t *testing.T) {
|
|||
}
|
||||
|
||||
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||
defer testKubelet.Cleanup()
|
||||
fakeRuntime := testKubelet.fakeRuntime
|
||||
mockCadvisor := testKubelet.fakeCadvisor
|
||||
cadvisorReq := &cadvisorapi.ContainerInfoRequest{}
|
||||
|
@ -1012,6 +1039,7 @@ func TestGetContainerInfoForMirrorPods(t *testing.T) {
|
|||
|
||||
func TestHostNetworkAllowed(t *testing.T) {
|
||||
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||
defer testKubelet.Cleanup()
|
||||
testKubelet.fakeCadvisor.On("Start").Return(nil)
|
||||
testKubelet.fakeCadvisor.On("VersionInfo").Return(&cadvisorapi.VersionInfo{}, nil)
|
||||
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
|
||||
|
@ -1044,6 +1072,7 @@ func TestHostNetworkAllowed(t *testing.T) {
|
|||
|
||||
func TestHostNetworkDisallowed(t *testing.T) {
|
||||
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||
defer testKubelet.Cleanup()
|
||||
testKubelet.fakeCadvisor.On("Start").Return(nil)
|
||||
testKubelet.fakeCadvisor.On("VersionInfo").Return(&cadvisorapi.VersionInfo{}, nil)
|
||||
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
|
||||
|
@ -1075,6 +1104,7 @@ func TestHostNetworkDisallowed(t *testing.T) {
|
|||
|
||||
func TestPrivilegeContainerAllowed(t *testing.T) {
|
||||
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||
defer testKubelet.Cleanup()
|
||||
testKubelet.fakeCadvisor.On("Start").Return(nil)
|
||||
testKubelet.fakeCadvisor.On("VersionInfo").Return(&cadvisorapi.VersionInfo{}, nil)
|
||||
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
|
||||
|
@ -1104,6 +1134,7 @@ func TestPrivilegeContainerAllowed(t *testing.T) {
|
|||
|
||||
func TestPrivilegedContainerDisallowed(t *testing.T) {
|
||||
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||
defer testKubelet.Cleanup()
|
||||
testKubelet.fakeCadvisor.On("VersionInfo").Return(&cadvisorapi.VersionInfo{}, nil)
|
||||
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
|
||||
testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
|
||||
|
@ -1130,6 +1161,7 @@ func TestPrivilegedContainerDisallowed(t *testing.T) {
|
|||
|
||||
func TestNetworkErrorsWithoutHostNetwork(t *testing.T) {
|
||||
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||
defer testKubelet.Cleanup()
|
||||
testKubelet.fakeCadvisor.On("VersionInfo").Return(&cadvisorapi.VersionInfo{}, nil)
|
||||
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
|
||||
testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
|
||||
|
@ -1171,6 +1203,7 @@ func TestNetworkErrorsWithoutHostNetwork(t *testing.T) {
|
|||
|
||||
func TestFilterOutTerminatedPods(t *testing.T) {
|
||||
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||
defer testKubelet.Cleanup()
|
||||
kubelet := testKubelet.kubelet
|
||||
pods := newTestPods(5)
|
||||
pods[0].Status.Phase = v1.PodFailed
|
||||
|
@ -1237,6 +1270,7 @@ func TestMakePortMappings(t *testing.T) {
|
|||
|
||||
func TestSyncPodsSetStatusToFailedForPodsThatRunTooLong(t *testing.T) {
|
||||
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||
defer testKubelet.Cleanup()
|
||||
fakeRuntime := testKubelet.fakeRuntime
|
||||
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
|
||||
kubelet := testKubelet.kubelet
|
||||
|
@ -1247,7 +1281,7 @@ func TestSyncPodsSetStatusToFailedForPodsThatRunTooLong(t *testing.T) {
|
|||
|
||||
pods := []*v1.Pod{
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
UID: "12345678",
|
||||
Name: "bar",
|
||||
Namespace: "new",
|
||||
|
@ -1284,6 +1318,7 @@ func TestSyncPodsSetStatusToFailedForPodsThatRunTooLong(t *testing.T) {
|
|||
|
||||
func TestSyncPodsDoesNotSetPodsThatDidNotRunTooLongToFailed(t *testing.T) {
|
||||
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||
defer testKubelet.Cleanup()
|
||||
fakeRuntime := testKubelet.fakeRuntime
|
||||
testKubelet.fakeCadvisor.On("Start").Return(nil)
|
||||
testKubelet.fakeCadvisor.On("VersionInfo").Return(&cadvisorapi.VersionInfo{}, nil)
|
||||
|
@ -1299,7 +1334,7 @@ func TestSyncPodsDoesNotSetPodsThatDidNotRunTooLongToFailed(t *testing.T) {
|
|||
|
||||
pods := []*v1.Pod{
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
UID: "12345678",
|
||||
Name: "bar",
|
||||
Namespace: "new",
|
||||
|
@ -1336,7 +1371,7 @@ func TestSyncPodsDoesNotSetPodsThatDidNotRunTooLongToFailed(t *testing.T) {
|
|||
|
||||
func podWithUidNameNs(uid types.UID, name, namespace string) *v1.Pod {
|
||||
return &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
UID: uid,
|
||||
Name: name,
|
||||
Namespace: namespace,
|
||||
|
@ -1353,6 +1388,7 @@ func podWithUidNameNsSpec(uid types.UID, name, namespace string, spec v1.PodSpec
|
|||
|
||||
func TestDeletePodDirsForDeletedPods(t *testing.T) {
|
||||
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||
defer testKubelet.Cleanup()
|
||||
testKubelet.fakeCadvisor.On("Start").Return(nil)
|
||||
testKubelet.fakeCadvisor.On("VersionInfo").Return(&cadvisorapi.VersionInfo{}, nil)
|
||||
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
|
||||
|
@ -1392,6 +1428,7 @@ func syncAndVerifyPodDir(t *testing.T, testKubelet *TestKubelet, pods []*v1.Pod,
|
|||
|
||||
func TestDoesNotDeletePodDirsForTerminatedPods(t *testing.T) {
|
||||
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||
defer testKubelet.Cleanup()
|
||||
testKubelet.fakeCadvisor.On("Start").Return(nil)
|
||||
testKubelet.fakeCadvisor.On("VersionInfo").Return(&cadvisorapi.VersionInfo{}, nil)
|
||||
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
|
||||
|
@ -1414,6 +1451,7 @@ func TestDoesNotDeletePodDirsForTerminatedPods(t *testing.T) {
|
|||
|
||||
func TestDoesNotDeletePodDirsIfContainerIsRunning(t *testing.T) {
|
||||
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||
defer testKubelet.Cleanup()
|
||||
testKubelet.fakeCadvisor.On("Start").Return(nil)
|
||||
testKubelet.fakeCadvisor.On("VersionInfo").Return(&cadvisorapi.VersionInfo{}, nil)
|
||||
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
|
||||
|
@ -1446,6 +1484,7 @@ func TestDoesNotDeletePodDirsIfContainerIsRunning(t *testing.T) {
|
|||
|
||||
func TestGetPodsToSync(t *testing.T) {
|
||||
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||
defer testKubelet.Cleanup()
|
||||
kubelet := testKubelet.kubelet
|
||||
clock := testKubelet.fakeClock
|
||||
pods := newTestPods(5)
|
||||
|
@ -1476,6 +1515,7 @@ func TestGetPodsToSync(t *testing.T) {
|
|||
|
||||
func TestGenerateAPIPodStatusWithSortedContainers(t *testing.T) {
|
||||
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||
defer testKubelet.Cleanup()
|
||||
testKubelet.fakeCadvisor.On("VersionInfo").Return(&cadvisorapi.VersionInfo{}, nil)
|
||||
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
|
||||
testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
|
||||
|
@ -1538,6 +1578,7 @@ func TestGenerateAPIPodStatusWithReasonCache(t *testing.T) {
|
|||
testErrorReason := fmt.Errorf("test-error")
|
||||
emptyContainerID := (&kubecontainer.ContainerID{}).String()
|
||||
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||
defer testKubelet.Cleanup()
|
||||
testKubelet.fakeCadvisor.On("VersionInfo").Return(&cadvisorapi.VersionInfo{}, nil)
|
||||
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
|
||||
testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
|
||||
|
@ -1727,6 +1768,7 @@ func TestGenerateAPIPodStatusWithDifferentRestartPolicies(t *testing.T) {
|
|||
testErrorReason := fmt.Errorf("test-error")
|
||||
emptyContainerID := (&kubecontainer.ContainerID{}).String()
|
||||
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||
defer testKubelet.Cleanup()
|
||||
testKubelet.fakeCadvisor.On("VersionInfo").Return(&cadvisorapi.VersionInfo{}, nil)
|
||||
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
|
||||
testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
|
||||
|
@ -1893,10 +1935,11 @@ func (a *testPodAdmitHandler) Admit(attrs *lifecycle.PodAdmitAttributes) lifecyc
|
|||
// Test verifies that the kubelet invokes an admission handler during HandlePodAdditions.
|
||||
func TestHandlePodAdditionsInvokesPodAdmitHandlers(t *testing.T) {
|
||||
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||
defer testKubelet.Cleanup()
|
||||
kl := testKubelet.kubelet
|
||||
kl.nodeLister = testNodeLister{nodes: []v1.Node{
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{Name: string(kl.nodeName)},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: string(kl.nodeName)},
|
||||
Status: v1.NodeStatus{
|
||||
Allocatable: v1.ResourceList{
|
||||
v1.ResourcePods: *resource.NewQuantity(110, resource.DecimalSI),
|
||||
|
@ -1906,7 +1949,7 @@ func TestHandlePodAdditionsInvokesPodAdmitHandlers(t *testing.T) {
|
|||
}}
|
||||
kl.nodeInfo = testNodeInfo{nodes: []v1.Node{
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{Name: string(kl.nodeName)},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: string(kl.nodeName)},
|
||||
Status: v1.NodeStatus{
|
||||
Allocatable: v1.ResourceList{
|
||||
v1.ResourcePods: *resource.NewQuantity(110, resource.DecimalSI),
|
||||
|
@ -1920,14 +1963,14 @@ func TestHandlePodAdditionsInvokesPodAdmitHandlers(t *testing.T) {
|
|||
|
||||
pods := []*v1.Pod{
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
UID: "123456789",
|
||||
Name: "podA",
|
||||
Namespace: "foo",
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
UID: "987654321",
|
||||
Name: "podB",
|
||||
Namespace: "foo",
|
||||
|
@ -1972,6 +2015,7 @@ func (a *testPodSyncLoopHandler) ShouldSync(pod *v1.Pod) bool {
|
|||
// TestGetPodsToSyncInvokesPodSyncLoopHandlers ensures that the get pods to sync routine invokes the handler.
|
||||
func TestGetPodsToSyncInvokesPodSyncLoopHandlers(t *testing.T) {
|
||||
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||
defer testKubelet.Cleanup()
|
||||
kubelet := testKubelet.kubelet
|
||||
pods := newTestPods(5)
|
||||
expected := []*v1.Pod{pods[0]}
|
||||
|
@ -2007,6 +2051,7 @@ func (a *testPodSyncHandler) ShouldEvict(pod *v1.Pod) lifecycle.ShouldEvictRespo
|
|||
// TestGenerateAPIPodStatusInvokesPodSyncHandlers invokes the handlers and reports the proper status
|
||||
func TestGenerateAPIPodStatusInvokesPodSyncHandlers(t *testing.T) {
|
||||
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||
defer testKubelet.Cleanup()
|
||||
kubelet := testKubelet.kubelet
|
||||
pod := newTestPods(1)[0]
|
||||
podsToEvict := []*v1.Pod{pod}
|
||||
|
@ -2024,9 +2069,10 @@ func TestGenerateAPIPodStatusInvokesPodSyncHandlers(t *testing.T) {
|
|||
|
||||
func TestSyncPodKillPod(t *testing.T) {
|
||||
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||
defer testKubelet.Cleanup()
|
||||
kl := testKubelet.kubelet
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
UID: "12345678",
|
||||
Name: "bar",
|
||||
Namespace: "foo",
|
||||
|
|
16
vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_volumes_test.go
generated
vendored
16
vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_volumes_test.go
generated
vendored
|
@ -21,6 +21,7 @@ import (
|
|||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
|
@ -85,11 +86,12 @@ func TestListVolumesForPod(t *testing.T) {
|
|||
|
||||
func TestPodVolumesExist(t *testing.T) {
|
||||
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||
defer testKubelet.Cleanup()
|
||||
kubelet := testKubelet.kubelet
|
||||
|
||||
pods := []*v1.Pod{
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod1",
|
||||
UID: "pod1uid",
|
||||
},
|
||||
|
@ -107,7 +109,7 @@ func TestPodVolumesExist(t *testing.T) {
|
|||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod2",
|
||||
UID: "pod2uid",
|
||||
},
|
||||
|
@ -125,7 +127,7 @@ func TestPodVolumesExist(t *testing.T) {
|
|||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod3",
|
||||
UID: "pod3uid",
|
||||
},
|
||||
|
@ -169,6 +171,7 @@ func TestPodVolumesExist(t *testing.T) {
|
|||
|
||||
func TestVolumeAttachAndMountControllerDisabled(t *testing.T) {
|
||||
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||
defer testKubelet.Cleanup()
|
||||
kubelet := testKubelet.kubelet
|
||||
|
||||
pod := podWithUidNameNsSpec("12345678", "foo", "test", v1.PodSpec{
|
||||
|
@ -214,6 +217,7 @@ func TestVolumeAttachAndMountControllerDisabled(t *testing.T) {
|
|||
|
||||
func TestVolumeUnmountAndDetachControllerDisabled(t *testing.T) {
|
||||
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||
defer testKubelet.Cleanup()
|
||||
kubelet := testKubelet.kubelet
|
||||
|
||||
pod := podWithUidNameNsSpec("12345678", "foo", "test", v1.PodSpec{
|
||||
|
@ -284,12 +288,13 @@ func TestVolumeUnmountAndDetachControllerDisabled(t *testing.T) {
|
|||
|
||||
func TestVolumeAttachAndMountControllerEnabled(t *testing.T) {
|
||||
testKubelet := newTestKubelet(t, true /* controllerAttachDetachEnabled */)
|
||||
defer testKubelet.Cleanup()
|
||||
kubelet := testKubelet.kubelet
|
||||
kubeClient := testKubelet.fakeKubeClient
|
||||
kubeClient.AddReactor("get", "nodes",
|
||||
func(action core.Action) (bool, runtime.Object, error) {
|
||||
return true, &v1.Node{
|
||||
ObjectMeta: v1.ObjectMeta{Name: testKubeletHostname},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname},
|
||||
Status: v1.NodeStatus{
|
||||
VolumesAttached: []v1.AttachedVolume{
|
||||
{
|
||||
|
@ -352,12 +357,13 @@ func TestVolumeAttachAndMountControllerEnabled(t *testing.T) {
|
|||
|
||||
func TestVolumeUnmountAndDetachControllerEnabled(t *testing.T) {
|
||||
testKubelet := newTestKubelet(t, true /* controllerAttachDetachEnabled */)
|
||||
defer testKubelet.Cleanup()
|
||||
kubelet := testKubelet.kubelet
|
||||
kubeClient := testKubelet.fakeKubeClient
|
||||
kubeClient.AddReactor("get", "nodes",
|
||||
func(action core.Action) (bool, runtime.Object, error) {
|
||||
return true, &v1.Node{
|
||||
ObjectMeta: v1.ObjectMeta{Name: testKubeletHostname},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname},
|
||||
Status: v1.NodeStatus{
|
||||
VolumesAttached: []v1.AttachedVolume{
|
||||
{
|
||||
|
|
6
vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/BUILD
generated
vendored
6
vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/BUILD
generated
vendored
|
@ -45,10 +45,11 @@ go_library(
|
|||
"//pkg/kubelet/util/cache:go_default_library",
|
||||
"//pkg/kubelet/util/format:go_default_library",
|
||||
"//pkg/securitycontext:go_default_library",
|
||||
"//pkg/util/flowcontrol:go_default_library",
|
||||
"//pkg/util/parsers:go_default_library",
|
||||
"//pkg/util/selinux:go_default_library",
|
||||
"//pkg/util/tail:go_default_library",
|
||||
"//pkg/util/version:go_default_library",
|
||||
"//vendor:github.com/armon/circbuf",
|
||||
"//vendor:github.com/docker/docker/pkg/jsonlog",
|
||||
"//vendor:github.com/fsnotify/fsnotify",
|
||||
"//vendor:github.com/golang/glog",
|
||||
|
@ -58,6 +59,7 @@ go_library(
|
|||
"//vendor:k8s.io/apimachinery/pkg/util/errors",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/runtime",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/sets",
|
||||
"//vendor:k8s.io/client-go/util/flowcontrol",
|
||||
],
|
||||
)
|
||||
|
||||
|
@ -84,7 +86,6 @@ go_test(
|
|||
"//pkg/kubelet/container/testing:go_default_library",
|
||||
"//pkg/kubelet/network:go_default_library",
|
||||
"//pkg/kubelet/network/testing:go_default_library",
|
||||
"//pkg/util/flowcontrol:go_default_library",
|
||||
"//pkg/util/intstr:go_default_library",
|
||||
"//vendor:github.com/golang/mock/gomock",
|
||||
"//vendor:github.com/google/cadvisor/info/v1",
|
||||
|
@ -92,6 +93,7 @@ go_test(
|
|||
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
|
||||
"//vendor:k8s.io/apimachinery/pkg/types",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/sets",
|
||||
"//vendor:k8s.io/client-go/util/flowcontrol",
|
||||
],
|
||||
)
|
||||
|
||||
|
|
4
vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/fake_kuberuntime_manager.go
generated
vendored
4
vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/fake_kuberuntime_manager.go
generated
vendored
|
@ -24,6 +24,7 @@ import (
|
|||
cadvisorapi "github.com/google/cadvisor/info/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
kubetypes "k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/util/flowcontrol"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/client/record"
|
||||
"k8s.io/kubernetes/pkg/credentialprovider"
|
||||
|
@ -33,7 +34,6 @@ import (
|
|||
"k8s.io/kubernetes/pkg/kubelet/lifecycle"
|
||||
"k8s.io/kubernetes/pkg/kubelet/network"
|
||||
proberesults "k8s.io/kubernetes/pkg/kubelet/prober/results"
|
||||
"k8s.io/kubernetes/pkg/util/flowcontrol"
|
||||
)
|
||||
|
||||
type fakeHTTP struct {
|
||||
|
@ -113,7 +113,7 @@ func NewFakeKubeRuntimeManager(runtimeService internalapi.RuntimeService, imageS
|
|||
}
|
||||
|
||||
kubeRuntimeManager.containerGC = NewContainerGC(runtimeService, newFakePodGetter(), kubeRuntimeManager)
|
||||
kubeRuntimeManager.runtimeName = typedVersion.GetRuntimeName()
|
||||
kubeRuntimeManager.runtimeName = typedVersion.RuntimeName
|
||||
kubeRuntimeManager.imagePuller = images.NewImageManager(
|
||||
kubecontainer.FilterEventRecorder(recorder),
|
||||
kubeRuntimeManager,
|
||||
|
|
48
vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/helpers.go
generated
vendored
48
vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/helpers.go
generated
vendored
|
@ -61,7 +61,7 @@ type podSandboxByCreated []*runtimeapi.PodSandbox
|
|||
|
||||
func (p podSandboxByCreated) Len() int { return len(p) }
|
||||
func (p podSandboxByCreated) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
|
||||
func (p podSandboxByCreated) Less(i, j int) bool { return p[i].GetCreatedAt() > p[j].GetCreatedAt() }
|
||||
func (p podSandboxByCreated) Less(i, j int) bool { return p[i].CreatedAt > p[j].CreatedAt }
|
||||
|
||||
type containerStatusByCreated []*kubecontainer.ContainerStatus
|
||||
|
||||
|
@ -100,18 +100,18 @@ func toRuntimeProtocol(protocol v1.Protocol) runtimeapi.Protocol {
|
|||
|
||||
// toKubeContainer converts runtimeapi.Container to kubecontainer.Container.
|
||||
func (m *kubeGenericRuntimeManager) toKubeContainer(c *runtimeapi.Container) (*kubecontainer.Container, error) {
|
||||
if c == nil || c.Id == nil || c.Image == nil || c.State == nil {
|
||||
if c == nil || c.Id == "" || c.Image == nil {
|
||||
return nil, fmt.Errorf("unable to convert a nil pointer to a runtime container")
|
||||
}
|
||||
|
||||
labeledInfo := getContainerInfoFromLabels(c.Labels)
|
||||
annotatedInfo := getContainerInfoFromAnnotations(c.Annotations)
|
||||
return &kubecontainer.Container{
|
||||
ID: kubecontainer.ContainerID{Type: m.runtimeName, ID: c.GetId()},
|
||||
ID: kubecontainer.ContainerID{Type: m.runtimeName, ID: c.Id},
|
||||
Name: labeledInfo.ContainerName,
|
||||
Image: c.Image.GetImage(),
|
||||
Image: c.Image.Image,
|
||||
Hash: annotatedInfo.Hash,
|
||||
State: toKubeContainerState(c.GetState()),
|
||||
State: toKubeContainerState(c.State),
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
@ -120,34 +120,36 @@ func (m *kubeGenericRuntimeManager) toKubeContainer(c *runtimeapi.Container) (*k
|
|||
// kubecontainer.Containers to avoid substantial changes to PLEG.
|
||||
// TODO: Remove this once it becomes obsolete.
|
||||
func (m *kubeGenericRuntimeManager) sandboxToKubeContainer(s *runtimeapi.PodSandbox) (*kubecontainer.Container, error) {
|
||||
if s == nil || s.Id == nil || s.State == nil {
|
||||
if s == nil || s.Id == "" {
|
||||
return nil, fmt.Errorf("unable to convert a nil pointer to a runtime container")
|
||||
}
|
||||
|
||||
return &kubecontainer.Container{
|
||||
ID: kubecontainer.ContainerID{Type: m.runtimeName, ID: s.GetId()},
|
||||
State: kubecontainer.SandboxToContainerState(s.GetState()),
|
||||
ID: kubecontainer.ContainerID{Type: m.runtimeName, ID: s.Id},
|
||||
State: kubecontainer.SandboxToContainerState(s.State),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// getImageUser gets uid or user name that will run the command(s) from image. The function
|
||||
// guarantees that only one of them is set.
|
||||
func (m *kubeGenericRuntimeManager) getImageUser(image string) (*int64, *string, error) {
|
||||
imageStatus, err := m.imageService.ImageStatus(&runtimeapi.ImageSpec{Image: &image})
|
||||
func (m *kubeGenericRuntimeManager) getImageUser(image string) (*int64, string, error) {
|
||||
imageStatus, err := m.imageService.ImageStatus(&runtimeapi.ImageSpec{Image: image})
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
return nil, "", err
|
||||
}
|
||||
|
||||
if imageStatus != nil && imageStatus.Uid != nil {
|
||||
// If uid is set, return uid.
|
||||
return imageStatus.Uid, nil, nil
|
||||
}
|
||||
if imageStatus != nil && imageStatus.Username != nil {
|
||||
// If uid is not set, but user name is set, return user name.
|
||||
return nil, imageStatus.Username, nil
|
||||
if imageStatus != nil {
|
||||
if imageStatus.Uid != nil {
|
||||
return &imageStatus.GetUid().Value, "", nil
|
||||
}
|
||||
|
||||
if imageStatus.Username != "" {
|
||||
return nil, imageStatus.Username, nil
|
||||
}
|
||||
}
|
||||
|
||||
// If non of them is set, treat it as root.
|
||||
return new(int64), nil, nil
|
||||
return new(int64), "", nil
|
||||
}
|
||||
|
||||
// isContainerFailed returns true if container has exited and exitcode is not zero.
|
||||
|
@ -226,10 +228,10 @@ func toKubeRuntimeStatus(status *runtimeapi.RuntimeStatus) *kubecontainer.Runtim
|
|||
conditions := []kubecontainer.RuntimeCondition{}
|
||||
for _, c := range status.GetConditions() {
|
||||
conditions = append(conditions, kubecontainer.RuntimeCondition{
|
||||
Type: kubecontainer.RuntimeConditionType(c.GetType()),
|
||||
Status: c.GetStatus(),
|
||||
Reason: c.GetReason(),
|
||||
Message: c.GetMessage(),
|
||||
Type: kubecontainer.RuntimeConditionType(c.Type),
|
||||
Status: c.Status,
|
||||
Reason: c.Reason,
|
||||
Message: c.Message,
|
||||
})
|
||||
}
|
||||
return &kubecontainer.RuntimeStatus{Conditions: conditions}
|
||||
|
|
3
vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/helpers_test.go
generated
vendored
3
vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/helpers_test.go
generated
vendored
|
@ -20,6 +20,7 @@ import (
|
|||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
)
|
||||
|
||||
|
@ -29,7 +30,7 @@ func TestStableKey(t *testing.T) {
|
|||
Image: "foo/image:v1",
|
||||
}
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test_pod",
|
||||
Namespace: "test_pod_namespace",
|
||||
UID: "test_pod_uid",
|
||||
|
|
184
vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_container.go
generated
vendored
184
vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_container.go
generated
vendored
|
@ -19,7 +19,6 @@ package kuberuntime
|
|||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
"net/url"
|
||||
"os"
|
||||
|
@ -28,7 +27,9 @@ import (
|
|||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/armon/circbuf"
|
||||
"github.com/golang/glog"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
kubetypes "k8s.io/apimachinery/pkg/types"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
|
@ -41,6 +42,7 @@ import (
|
|||
"k8s.io/kubernetes/pkg/kubelet/types"
|
||||
"k8s.io/kubernetes/pkg/kubelet/util/format"
|
||||
"k8s.io/kubernetes/pkg/util/selinux"
|
||||
"k8s.io/kubernetes/pkg/util/tail"
|
||||
)
|
||||
|
||||
// startContainer starts a container and returns a message indicates why it is failed on error.
|
||||
|
@ -102,9 +104,9 @@ func (m *kubeGenericRuntimeManager) startContainer(podSandboxID string, podSandb
|
|||
// TODO(random-liu): Remove this after cluster logging supports CRI container log path.
|
||||
containerMeta := containerConfig.GetMetadata()
|
||||
sandboxMeta := podSandboxConfig.GetMetadata()
|
||||
legacySymlink := legacyLogSymlink(containerID, containerMeta.GetName(), sandboxMeta.GetName(),
|
||||
sandboxMeta.GetNamespace())
|
||||
containerLog := filepath.Join(podSandboxConfig.GetLogDirectory(), containerConfig.GetLogPath())
|
||||
legacySymlink := legacyLogSymlink(containerID, containerMeta.Name, sandboxMeta.Name,
|
||||
sandboxMeta.Namespace)
|
||||
containerLog := filepath.Join(podSandboxConfig.LogDirectory, containerConfig.LogPath)
|
||||
if err := m.osInterface.Symlink(containerLog, legacySymlink); err != nil {
|
||||
glog.Errorf("Failed to create legacy symbolic link %q to container %q log %q: %v",
|
||||
legacySymlink, containerID, containerLog, err)
|
||||
|
@ -144,8 +146,8 @@ func (m *kubeGenericRuntimeManager) generateContainerConfig(container *v1.Contai
|
|||
if err := verifyRunAsNonRoot(pod, container, *uid); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
glog.Warningf("Non-root verification doesn't support non-numeric user (%s)", *username)
|
||||
} else if username != "" {
|
||||
glog.Warningf("Non-root verification doesn't support non-numeric user (%s)", username)
|
||||
}
|
||||
|
||||
command, args := kubecontainer.ExpandContainerCommandAndArgs(container, opts.Envs)
|
||||
|
@ -153,21 +155,21 @@ func (m *kubeGenericRuntimeManager) generateContainerConfig(container *v1.Contai
|
|||
restartCountUint32 := uint32(restartCount)
|
||||
config := &runtimeapi.ContainerConfig{
|
||||
Metadata: &runtimeapi.ContainerMetadata{
|
||||
Name: &container.Name,
|
||||
Attempt: &restartCountUint32,
|
||||
Name: container.Name,
|
||||
Attempt: restartCountUint32,
|
||||
},
|
||||
Image: &runtimeapi.ImageSpec{Image: &imageRef},
|
||||
Image: &runtimeapi.ImageSpec{Image: imageRef},
|
||||
Command: command,
|
||||
Args: args,
|
||||
WorkingDir: &container.WorkingDir,
|
||||
WorkingDir: container.WorkingDir,
|
||||
Labels: newContainerLabels(container, pod),
|
||||
Annotations: newContainerAnnotations(container, pod, restartCount),
|
||||
Devices: makeDevices(opts),
|
||||
Mounts: m.makeMounts(opts, container),
|
||||
LogPath: &containerLogsPath,
|
||||
Stdin: &container.Stdin,
|
||||
StdinOnce: &container.StdinOnce,
|
||||
Tty: &container.TTY,
|
||||
LogPath: containerLogsPath,
|
||||
Stdin: container.Stdin,
|
||||
StdinOnce: container.StdinOnce,
|
||||
Tty: container.TTY,
|
||||
Linux: m.generateLinuxContainerConfig(container, pod, uid, username),
|
||||
}
|
||||
|
||||
|
@ -176,8 +178,8 @@ func (m *kubeGenericRuntimeManager) generateContainerConfig(container *v1.Contai
|
|||
for idx := range opts.Envs {
|
||||
e := opts.Envs[idx]
|
||||
envs[idx] = &runtimeapi.KeyValue{
|
||||
Key: &e.Name,
|
||||
Value: &e.Value,
|
||||
Key: e.Name,
|
||||
Value: e.Value,
|
||||
}
|
||||
}
|
||||
config.Envs = envs
|
||||
|
@ -186,7 +188,7 @@ func (m *kubeGenericRuntimeManager) generateContainerConfig(container *v1.Contai
|
|||
}
|
||||
|
||||
// generateLinuxContainerConfig generates linux container config for kubelet runtime v1.
|
||||
func (m *kubeGenericRuntimeManager) generateLinuxContainerConfig(container *v1.Container, pod *v1.Pod, uid *int64, username *string) *runtimeapi.LinuxContainerConfig {
|
||||
func (m *kubeGenericRuntimeManager) generateLinuxContainerConfig(container *v1.Container, pod *v1.Pod, uid *int64, username string) *runtimeapi.LinuxContainerConfig {
|
||||
lc := &runtimeapi.LinuxContainerConfig{
|
||||
Resources: &runtimeapi.LinuxContainerResources{},
|
||||
SecurityContext: m.determineEffectiveSecurityContext(pod, container, uid, username),
|
||||
|
@ -209,20 +211,20 @@ func (m *kubeGenericRuntimeManager) generateLinuxContainerConfig(container *v1.C
|
|||
// of CPU shares.
|
||||
cpuShares = milliCPUToShares(cpuRequest.MilliValue())
|
||||
}
|
||||
lc.Resources.CpuShares = &cpuShares
|
||||
lc.Resources.CpuShares = cpuShares
|
||||
if memoryLimit != 0 {
|
||||
lc.Resources.MemoryLimitInBytes = &memoryLimit
|
||||
lc.Resources.MemoryLimitInBytes = memoryLimit
|
||||
}
|
||||
// Set OOM score of the container based on qos policy. Processes in lower-priority pods should
|
||||
// be killed first if the system runs out of memory.
|
||||
lc.Resources.OomScoreAdj = &oomScoreAdj
|
||||
lc.Resources.OomScoreAdj = oomScoreAdj
|
||||
|
||||
if m.cpuCFSQuota {
|
||||
// if cpuLimit.Amount is nil, then the appropriate default value is returned
|
||||
// to allow full usage of cpu resource.
|
||||
cpuQuota, cpuPeriod := milliCPUToQuota(cpuLimit.MilliValue())
|
||||
lc.Resources.CpuQuota = &cpuQuota
|
||||
lc.Resources.CpuPeriod = &cpuPeriod
|
||||
lc.Resources.CpuQuota = cpuQuota
|
||||
lc.Resources.CpuPeriod = cpuPeriod
|
||||
}
|
||||
|
||||
return lc
|
||||
|
@ -235,9 +237,9 @@ func makeDevices(opts *kubecontainer.RunContainerOptions) []*runtimeapi.Device {
|
|||
for idx := range opts.Devices {
|
||||
device := opts.Devices[idx]
|
||||
devices[idx] = &runtimeapi.Device{
|
||||
HostPath: &device.PathOnHost,
|
||||
ContainerPath: &device.PathInContainer,
|
||||
Permissions: &device.Permissions,
|
||||
HostPath: device.PathOnHost,
|
||||
ContainerPath: device.PathInContainer,
|
||||
Permissions: device.Permissions,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -252,10 +254,10 @@ func (m *kubeGenericRuntimeManager) makeMounts(opts *kubecontainer.RunContainerO
|
|||
v := opts.Mounts[idx]
|
||||
selinuxRelabel := v.SELinuxRelabel && selinux.SELinuxEnabled()
|
||||
mount := &runtimeapi.Mount{
|
||||
HostPath: &v.HostPath,
|
||||
ContainerPath: &v.ContainerPath,
|
||||
Readonly: &v.ReadOnly,
|
||||
SelinuxRelabel: &selinuxRelabel,
|
||||
HostPath: v.HostPath,
|
||||
ContainerPath: v.ContainerPath,
|
||||
Readonly: v.ReadOnly,
|
||||
SelinuxRelabel: selinuxRelabel,
|
||||
}
|
||||
|
||||
volumeMounts = append(volumeMounts, mount)
|
||||
|
@ -272,14 +274,23 @@ func (m *kubeGenericRuntimeManager) makeMounts(opts *kubecontainer.RunContainerO
|
|||
containerLogPath := filepath.Join(opts.PodContainerDir, cid)
|
||||
fs, err := m.osInterface.Create(containerLogPath)
|
||||
if err != nil {
|
||||
glog.Errorf("Error on creating termination-log file %q: %v", containerLogPath, err)
|
||||
utilruntime.HandleError(fmt.Errorf("error on creating termination-log file %q: %v", containerLogPath, err))
|
||||
} else {
|
||||
fs.Close()
|
||||
|
||||
// Chmod is needed because ioutil.WriteFile() ends up calling
|
||||
// open(2) to create the file, so the final mode used is "mode &
|
||||
// ~umask". But we want to make sure the specified mode is used
|
||||
// in the file no matter what the umask is.
|
||||
if err := m.osInterface.Chmod(containerLogPath, 0666); err != nil {
|
||||
utilruntime.HandleError(fmt.Errorf("unable to set termination-log file permissions %q: %v", containerLogPath, err))
|
||||
}
|
||||
|
||||
selinuxRelabel := selinux.SELinuxEnabled()
|
||||
volumeMounts = append(volumeMounts, &runtimeapi.Mount{
|
||||
HostPath: &containerLogPath,
|
||||
ContainerPath: &container.TerminationMessagePath,
|
||||
SelinuxRelabel: &selinuxRelabel,
|
||||
HostPath: containerLogPath,
|
||||
ContainerPath: container.TerminationMessagePath,
|
||||
SelinuxRelabel: selinuxRelabel,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@ -296,7 +307,9 @@ func (m *kubeGenericRuntimeManager) getKubeletContainers(allContainers bool) ([]
|
|||
}
|
||||
if !allContainers {
|
||||
runningState := runtimeapi.ContainerState_CONTAINER_RUNNING
|
||||
filter.State = &runningState
|
||||
filter.State = &runtimeapi.ContainerStateValue{
|
||||
State: runningState,
|
||||
}
|
||||
}
|
||||
|
||||
containers, err := m.getContainersHelper(filter)
|
||||
|
@ -323,29 +336,36 @@ func makeUID() string {
|
|||
return fmt.Sprintf("%08x", rand.Uint32())
|
||||
}
|
||||
|
||||
// getTerminationMessage gets termination message of the container.
|
||||
func getTerminationMessage(status *runtimeapi.ContainerStatus, kubeStatus *kubecontainer.ContainerStatus, terminationMessagePath string) string {
|
||||
message := ""
|
||||
|
||||
if !kubeStatus.FinishedAt.IsZero() || kubeStatus.ExitCode != 0 {
|
||||
if terminationMessagePath == "" {
|
||||
return ""
|
||||
}
|
||||
|
||||
// getTerminationMessage looks on the filesystem for the provided termination message path, returning a limited
|
||||
// amount of those bytes, or returns true if the logs should be checked.
|
||||
func getTerminationMessage(status *runtimeapi.ContainerStatus, terminationMessagePath string, fallbackToLogs bool) (string, bool) {
|
||||
if len(terminationMessagePath) != 0 {
|
||||
for _, mount := range status.Mounts {
|
||||
if mount.GetContainerPath() == terminationMessagePath {
|
||||
path := mount.GetHostPath()
|
||||
if data, err := ioutil.ReadFile(path); err != nil {
|
||||
message = fmt.Sprintf("Error on reading termination-log %s: %v", path, err)
|
||||
} else {
|
||||
message = string(data)
|
||||
}
|
||||
break
|
||||
if mount.ContainerPath != terminationMessagePath {
|
||||
continue
|
||||
}
|
||||
path := mount.HostPath
|
||||
data, _, err := tail.ReadAtMost(path, kubecontainer.MaxContainerTerminationMessageLength)
|
||||
if err != nil {
|
||||
return fmt.Sprintf("Error on reading termination log %s: %v", path, err), false
|
||||
}
|
||||
if !fallbackToLogs || len(data) != 0 {
|
||||
return string(data), false
|
||||
}
|
||||
}
|
||||
}
|
||||
return "", fallbackToLogs
|
||||
}
|
||||
|
||||
return message
|
||||
// readLastStringFromContainerLogs attempts to read up to the max log length from the end of the CRI log represented
|
||||
// by path. It reads up to max log lines.
|
||||
func readLastStringFromContainerLogs(path string) string {
|
||||
value := int64(kubecontainer.MaxContainerTerminationMessageLogLines)
|
||||
buf, _ := circbuf.NewBuffer(kubecontainer.MaxContainerTerminationMessageLogLength)
|
||||
if err := ReadLogs(path, &v1.PodLogOptions{TailLines: &value}, buf, buf); err != nil {
|
||||
return fmt.Sprintf("Error on reading termination message from logs: %v", err)
|
||||
}
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
// getPodContainerStatuses gets all containers' statuses for the pod.
|
||||
|
@ -362,9 +382,9 @@ func (m *kubeGenericRuntimeManager) getPodContainerStatuses(uid kubetypes.UID, n
|
|||
statuses := make([]*kubecontainer.ContainerStatus, len(containers))
|
||||
// TODO: optimization: set maximum number of containers per container name to examine.
|
||||
for i, c := range containers {
|
||||
status, err := m.runtimeService.ContainerStatus(c.GetId())
|
||||
status, err := m.runtimeService.ContainerStatus(c.Id)
|
||||
if err != nil {
|
||||
glog.Errorf("ContainerStatus for %s error: %v", c.GetId(), err)
|
||||
glog.Errorf("ContainerStatus for %s error: %v", c.Id, err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
@ -373,31 +393,37 @@ func (m *kubeGenericRuntimeManager) getPodContainerStatuses(uid kubetypes.UID, n
|
|||
cStatus := &kubecontainer.ContainerStatus{
|
||||
ID: kubecontainer.ContainerID{
|
||||
Type: m.runtimeName,
|
||||
ID: c.GetId(),
|
||||
ID: c.Id,
|
||||
},
|
||||
Name: labeledInfo.ContainerName,
|
||||
Image: status.Image.GetImage(),
|
||||
ImageID: status.GetImageRef(),
|
||||
Image: status.Image.Image,
|
||||
ImageID: status.ImageRef,
|
||||
Hash: annotatedInfo.Hash,
|
||||
RestartCount: annotatedInfo.RestartCount,
|
||||
State: toKubeContainerState(c.GetState()),
|
||||
CreatedAt: time.Unix(0, status.GetCreatedAt()),
|
||||
State: toKubeContainerState(c.State),
|
||||
CreatedAt: time.Unix(0, status.CreatedAt),
|
||||
}
|
||||
|
||||
if c.GetState() == runtimeapi.ContainerState_CONTAINER_RUNNING {
|
||||
cStatus.StartedAt = time.Unix(0, status.GetStartedAt())
|
||||
if c.State == runtimeapi.ContainerState_CONTAINER_RUNNING {
|
||||
cStatus.StartedAt = time.Unix(0, status.StartedAt)
|
||||
} else {
|
||||
cStatus.Reason = status.GetReason()
|
||||
cStatus.Message = status.GetMessage()
|
||||
cStatus.ExitCode = int(status.GetExitCode())
|
||||
cStatus.FinishedAt = time.Unix(0, status.GetFinishedAt())
|
||||
cStatus.Reason = status.Reason
|
||||
cStatus.Message = status.Message
|
||||
cStatus.ExitCode = int(status.ExitCode)
|
||||
cStatus.FinishedAt = time.Unix(0, status.FinishedAt)
|
||||
|
||||
fallbackToLogs := annotatedInfo.TerminationMessagePolicy == v1.TerminationMessageFallbackToLogsOnError && (cStatus.ExitCode != 0 || cStatus.Reason == "OOMKilled")
|
||||
tMessage, checkLogs := getTerminationMessage(status, annotatedInfo.TerminationMessagePath, fallbackToLogs)
|
||||
if checkLogs {
|
||||
path := buildFullContainerLogsPath(uid, labeledInfo.ContainerName, annotatedInfo.RestartCount)
|
||||
tMessage = readLastStringFromContainerLogs(path)
|
||||
}
|
||||
// Use the termination message written by the application is not empty
|
||||
if len(tMessage) != 0 {
|
||||
cStatus.Message = tMessage
|
||||
}
|
||||
}
|
||||
|
||||
tMessage := getTerminationMessage(status, cStatus, annotatedInfo.TerminationMessagePath)
|
||||
// Use the termination message written by the application is not empty
|
||||
if len(tMessage) != 0 {
|
||||
cStatus.Message = tMessage
|
||||
}
|
||||
statuses[i] = cStatus
|
||||
}
|
||||
|
||||
|
@ -461,7 +487,7 @@ func (m *kubeGenericRuntimeManager) restoreSpecsFromContainerLabels(containerID
|
|||
// Notice that the followings are not full spec. The container killing code should not use
|
||||
// un-restored fields.
|
||||
pod = &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
UID: l.PodUID,
|
||||
Name: l.PodName,
|
||||
Namespace: l.PodNamespace,
|
||||
|
@ -670,31 +696,31 @@ func (m *kubeGenericRuntimeManager) GetContainerLogs(pod *v1.Pod, containerID ku
|
|||
// GetExec gets the endpoint the runtime will serve the exec request from.
|
||||
func (m *kubeGenericRuntimeManager) GetExec(id kubecontainer.ContainerID, cmd []string, stdin, stdout, stderr, tty bool) (*url.URL, error) {
|
||||
req := &runtimeapi.ExecRequest{
|
||||
ContainerId: &id.ID,
|
||||
ContainerId: id.ID,
|
||||
Cmd: cmd,
|
||||
Tty: &tty,
|
||||
Stdin: &stdin,
|
||||
Tty: tty,
|
||||
Stdin: stdin,
|
||||
}
|
||||
resp, err := m.runtimeService.Exec(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return url.Parse(resp.GetUrl())
|
||||
return url.Parse(resp.Url)
|
||||
}
|
||||
|
||||
// GetAttach gets the endpoint the runtime will serve the attach request from.
|
||||
func (m *kubeGenericRuntimeManager) GetAttach(id kubecontainer.ContainerID, stdin, stdout, stderr, tty bool) (*url.URL, error) {
|
||||
req := &runtimeapi.AttachRequest{
|
||||
ContainerId: &id.ID,
|
||||
Stdin: &stdin,
|
||||
Tty: &tty,
|
||||
ContainerId: id.ID,
|
||||
Stdin: stdin,
|
||||
Tty: tty,
|
||||
}
|
||||
resp, err := m.runtimeService.Attach(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return url.Parse(resp.GetUrl())
|
||||
return url.Parse(resp.Url)
|
||||
}
|
||||
|
||||
// RunInContainer synchronously executes the command in the container, and returns the output.
|
||||
|
|
7
vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_container_test.go
generated
vendored
7
vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_container_test.go
generated
vendored
|
@ -21,6 +21,7 @@ import (
|
|||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
runtimeapi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
||||
containertest "k8s.io/kubernetes/pkg/kubelet/container/testing"
|
||||
|
@ -30,7 +31,7 @@ import (
|
|||
func TestRemoveContainer(t *testing.T) {
|
||||
fakeRuntime, _, m, err := createTestRuntimeManager()
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
UID: "12345678",
|
||||
Name: "bar",
|
||||
Namespace: "new",
|
||||
|
@ -50,7 +51,7 @@ func TestRemoveContainer(t *testing.T) {
|
|||
_, fakeContainers := makeAndSetFakePod(t, m, fakeRuntime, pod)
|
||||
assert.Equal(t, len(fakeContainers), 1)
|
||||
|
||||
containerId := fakeContainers[0].GetId()
|
||||
containerId := fakeContainers[0].Id
|
||||
fakeOS := m.osInterface.(*containertest.FakeOS)
|
||||
err = m.removeContainer(containerId)
|
||||
assert.NoError(t, err)
|
||||
|
@ -60,7 +61,7 @@ func TestRemoveContainer(t *testing.T) {
|
|||
assert.Equal(t, fakeOS.Removes, []string{expectedContainerLogPath, expectedContainerLogSymlink})
|
||||
// Verify container is removed
|
||||
fakeRuntime.AssertCalls([]string{"RemoveContainer"})
|
||||
containers, err := fakeRuntime.ListContainers(&runtimeapi.ContainerFilter{Id: &containerId})
|
||||
containers, err := fakeRuntime.ListContainers(&runtimeapi.ContainerFilter{Id: containerId})
|
||||
assert.NoError(t, err)
|
||||
assert.Empty(t, containers)
|
||||
}
|
||||
|
|
18
vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_gc.go
generated
vendored
18
vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_gc.go
generated
vendored
|
@ -161,21 +161,21 @@ func (cgc *containerGC) evictableContainers(minAge time.Duration) (containersByE
|
|||
newestGCTime := time.Now().Add(-minAge)
|
||||
for _, container := range containers {
|
||||
// Prune out running containers.
|
||||
if container.GetState() == runtimeapi.ContainerState_CONTAINER_RUNNING {
|
||||
if container.State == runtimeapi.ContainerState_CONTAINER_RUNNING {
|
||||
continue
|
||||
}
|
||||
|
||||
createdAt := time.Unix(0, container.GetCreatedAt())
|
||||
createdAt := time.Unix(0, container.CreatedAt)
|
||||
if newestGCTime.Before(createdAt) {
|
||||
continue
|
||||
}
|
||||
|
||||
labeledInfo := getContainerInfoFromLabels(container.Labels)
|
||||
containerInfo := containerGCInfo{
|
||||
id: container.GetId(),
|
||||
name: container.Metadata.GetName(),
|
||||
id: container.Id,
|
||||
name: container.Metadata.Name,
|
||||
createTime: createdAt,
|
||||
sandboxID: container.GetPodSandboxId(),
|
||||
sandboxID: container.PodSandboxId,
|
||||
}
|
||||
key := evictUnit{
|
||||
uid: labeledInfo.PodUID,
|
||||
|
@ -256,15 +256,15 @@ func (cgc *containerGC) evictSandboxes(minAge time.Duration) error {
|
|||
newestGCTime := time.Now().Add(-minAge)
|
||||
for _, sandbox := range sandboxes {
|
||||
// Prune out ready sandboxes.
|
||||
if sandbox.GetState() == runtimeapi.PodSandboxState_SANDBOX_READY {
|
||||
if sandbox.State == runtimeapi.PodSandboxState_SANDBOX_READY {
|
||||
continue
|
||||
}
|
||||
|
||||
// Prune out sandboxes that still have containers.
|
||||
found := false
|
||||
sandboxID := sandbox.GetId()
|
||||
sandboxID := sandbox.Id
|
||||
for _, container := range containers {
|
||||
if container.GetPodSandboxId() == sandboxID {
|
||||
if container.PodSandboxId == sandboxID {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
|
@ -274,7 +274,7 @@ func (cgc *containerGC) evictSandboxes(minAge time.Duration) error {
|
|||
}
|
||||
|
||||
// Only garbage collect sandboxes older than sandboxMinGCAge.
|
||||
createdAt := time.Unix(0, sandbox.GetCreatedAt())
|
||||
createdAt := time.Unix(0, sandbox.CreatedAt)
|
||||
if createdAt.After(newestGCTime) {
|
||||
continue
|
||||
}
|
||||
|
|
4
vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_gc_test.go
generated
vendored
4
vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_gc_test.go
generated
vendored
|
@ -115,7 +115,7 @@ func TestSandboxGC(t *testing.T) {
|
|||
assert.NoError(t, err)
|
||||
assert.Len(t, realRemain, len(test.remain))
|
||||
for _, remain := range test.remain {
|
||||
status, err := fakeRuntime.PodSandboxStatus(fakeSandboxes[remain].GetId())
|
||||
status, err := fakeRuntime.PodSandboxStatus(fakeSandboxes[remain].Id)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, &fakeSandboxes[remain].PodSandboxStatus, status)
|
||||
}
|
||||
|
@ -288,7 +288,7 @@ func TestContainerGC(t *testing.T) {
|
|||
assert.NoError(t, err)
|
||||
assert.Len(t, realRemain, len(test.remain))
|
||||
for _, remain := range test.remain {
|
||||
status, err := fakeRuntime.ContainerStatus(fakeContainers[remain].GetId())
|
||||
status, err := fakeRuntime.ContainerStatus(fakeContainers[remain].Id)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, &fakeContainers[remain].ContainerStatus, status)
|
||||
}
|
||||
|
|
26
vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_image.go
generated
vendored
26
vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_image.go
generated
vendored
|
@ -40,7 +40,7 @@ func (m *kubeGenericRuntimeManager) PullImage(image kubecontainer.ImageSpec, pul
|
|||
return "", err
|
||||
}
|
||||
|
||||
imgSpec := &runtimeapi.ImageSpec{Image: &img}
|
||||
imgSpec := &runtimeapi.ImageSpec{Image: img}
|
||||
creds, withCredentials := keyring.Lookup(repoToPull)
|
||||
if !withCredentials {
|
||||
glog.V(3).Infof("Pulling image %q without credentials", img)
|
||||
|
@ -58,12 +58,12 @@ func (m *kubeGenericRuntimeManager) PullImage(image kubecontainer.ImageSpec, pul
|
|||
for _, currentCreds := range creds {
|
||||
authConfig := credentialprovider.LazyProvide(currentCreds)
|
||||
auth := &runtimeapi.AuthConfig{
|
||||
Username: &authConfig.Username,
|
||||
Password: &authConfig.Password,
|
||||
Auth: &authConfig.Auth,
|
||||
ServerAddress: &authConfig.ServerAddress,
|
||||
IdentityToken: &authConfig.IdentityToken,
|
||||
RegistryToken: &authConfig.RegistryToken,
|
||||
Username: authConfig.Username,
|
||||
Password: authConfig.Password,
|
||||
Auth: authConfig.Auth,
|
||||
ServerAddress: authConfig.ServerAddress,
|
||||
IdentityToken: authConfig.IdentityToken,
|
||||
RegistryToken: authConfig.RegistryToken,
|
||||
}
|
||||
|
||||
imageRef, err := m.imageService.PullImage(imgSpec, auth)
|
||||
|
@ -81,7 +81,7 @@ func (m *kubeGenericRuntimeManager) PullImage(image kubecontainer.ImageSpec, pul
|
|||
// GetImageRef gets the reference (digest or ID) of the image which has already been in
|
||||
// the local storage. It returns ("", nil) if the image isn't in the local storage.
|
||||
func (m *kubeGenericRuntimeManager) GetImageRef(image kubecontainer.ImageSpec) (string, error) {
|
||||
status, err := m.imageService.ImageStatus(&runtimeapi.ImageSpec{Image: &image.Image})
|
||||
status, err := m.imageService.ImageStatus(&runtimeapi.ImageSpec{Image: image.Image})
|
||||
if err != nil {
|
||||
glog.Errorf("ImageStatus for image %q failed: %v", image, err)
|
||||
return "", err
|
||||
|
@ -90,7 +90,7 @@ func (m *kubeGenericRuntimeManager) GetImageRef(image kubecontainer.ImageSpec) (
|
|||
return "", nil
|
||||
}
|
||||
|
||||
imageRef := status.GetId()
|
||||
imageRef := status.Id
|
||||
if len(status.RepoDigests) > 0 {
|
||||
imageRef = status.RepoDigests[0]
|
||||
}
|
||||
|
@ -109,8 +109,8 @@ func (m *kubeGenericRuntimeManager) ListImages() ([]kubecontainer.Image, error)
|
|||
|
||||
for _, img := range allImages {
|
||||
images = append(images, kubecontainer.Image{
|
||||
ID: img.GetId(),
|
||||
Size: int64(img.GetSize_()),
|
||||
ID: img.Id,
|
||||
Size: int64(img.Size_),
|
||||
RepoTags: img.RepoTags,
|
||||
RepoDigests: img.RepoDigests,
|
||||
})
|
||||
|
@ -121,7 +121,7 @@ func (m *kubeGenericRuntimeManager) ListImages() ([]kubecontainer.Image, error)
|
|||
|
||||
// RemoveImage removes the specified image.
|
||||
func (m *kubeGenericRuntimeManager) RemoveImage(image kubecontainer.ImageSpec) error {
|
||||
err := m.imageService.RemoveImage(&runtimeapi.ImageSpec{Image: &image.Image})
|
||||
err := m.imageService.RemoveImage(&runtimeapi.ImageSpec{Image: image.Image})
|
||||
if err != nil {
|
||||
glog.Errorf("Remove image %q failed: %v", image.Image, err)
|
||||
return err
|
||||
|
@ -142,7 +142,7 @@ func (m *kubeGenericRuntimeManager) ImageStats() (*kubecontainer.ImageStats, err
|
|||
}
|
||||
stats := &kubecontainer.ImageStats{}
|
||||
for _, img := range allImages {
|
||||
stats.TotalStorageBytes += img.GetSize_()
|
||||
stats.TotalStorageBytes += img.Size_
|
||||
}
|
||||
return stats, nil
|
||||
}
|
||||
|
|
40
vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_logs.go
generated
vendored
40
vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_logs.go
generated
vendored
|
@ -32,6 +32,7 @@ import (
|
|||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/util/tail"
|
||||
)
|
||||
|
||||
// Notice that the current kuberuntime logs implementation doesn't handle
|
||||
|
@ -120,7 +121,7 @@ func ReadLogs(path string, apiOpts *v1.PodLogOptions, stdout, stderr io.Writer)
|
|||
opts := newLogOptions(apiOpts, time.Now())
|
||||
|
||||
// Search start point based on tail line.
|
||||
start, err := tail(f, opts.tail)
|
||||
start, err := tail.FindTailLineStartIndex(f, opts.tail)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to tail %d lines of log file %q: %v", opts.tail, path, err)
|
||||
}
|
||||
|
@ -347,40 +348,3 @@ func (w *logWriter) write(msg *logMessage) error {
|
|||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// tail returns the start of last nth line.
|
||||
// * If n < 0, return the beginning of the file.
|
||||
// * If n >= 0, return the beginning of last nth line.
|
||||
// Notice that if the last line is incomplete (no end-of-line), it will not be counted
|
||||
// as one line.
|
||||
func tail(f io.ReadSeeker, n int64) (int64, error) {
|
||||
if n < 0 {
|
||||
return 0, nil
|
||||
}
|
||||
size, err := f.Seek(0, os.SEEK_END)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
var left, cnt int64
|
||||
buf := make([]byte, blockSize)
|
||||
for right := size; right > 0 && cnt <= n; right -= blockSize {
|
||||
left = right - blockSize
|
||||
if left < 0 {
|
||||
left = 0
|
||||
buf = make([]byte, right)
|
||||
}
|
||||
if _, err := f.Seek(left, os.SEEK_SET); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if _, err := f.Read(buf); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
cnt += int64(bytes.Count(buf, eol))
|
||||
}
|
||||
for ; cnt > n; cnt-- {
|
||||
idx := bytes.Index(buf, eol) + 1
|
||||
buf = buf[idx:]
|
||||
left += int64(idx)
|
||||
}
|
||||
return left, nil
|
||||
}
|
||||
|
|
26
vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_logs_test.go
generated
vendored
26
vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_logs_test.go
generated
vendored
|
@ -18,7 +18,6 @@ package kuberuntime
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
|
@ -242,28 +241,3 @@ func TestWriteLogsWithBytesLimit(t *testing.T) {
|
|||
assert.Equal(t, test.expectStderr, stderrBuf.String())
|
||||
}
|
||||
}
|
||||
|
||||
func TestTail(t *testing.T) {
|
||||
line := strings.Repeat("a", blockSize)
|
||||
testBytes := []byte(line + "\n" +
|
||||
line + "\n" +
|
||||
line + "\n" +
|
||||
line + "\n" +
|
||||
line[blockSize/2:]) // incomplete line
|
||||
|
||||
for c, test := range []struct {
|
||||
n int64
|
||||
start int64
|
||||
}{
|
||||
{n: -1, start: 0},
|
||||
{n: 0, start: int64(len(line)+1) * 4},
|
||||
{n: 1, start: int64(len(line)+1) * 3},
|
||||
{n: 9999, start: 0},
|
||||
} {
|
||||
t.Logf("TestCase #%d: %+v", c, test)
|
||||
r := bytes.NewReader(testBytes)
|
||||
s, err := tail(r, test.n)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, s, test.start)
|
||||
}
|
||||
}
|
||||
|
|
49
vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_manager.go
generated
vendored
49
vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_manager.go
generated
vendored
|
@ -25,8 +25,10 @@ import (
|
|||
"github.com/golang/glog"
|
||||
cadvisorapi "github.com/google/cadvisor/info/v1"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
kubetypes "k8s.io/apimachinery/pkg/types"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/client-go/util/flowcontrol"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/client/record"
|
||||
"k8s.io/kubernetes/pkg/credentialprovider"
|
||||
|
@ -41,7 +43,6 @@ import (
|
|||
"k8s.io/kubernetes/pkg/kubelet/types"
|
||||
"k8s.io/kubernetes/pkg/kubelet/util/cache"
|
||||
"k8s.io/kubernetes/pkg/kubelet/util/format"
|
||||
"k8s.io/kubernetes/pkg/util/flowcontrol"
|
||||
utilversion "k8s.io/kubernetes/pkg/util/version"
|
||||
)
|
||||
|
||||
|
@ -155,18 +156,18 @@ func NewKubeGenericRuntimeManager(
|
|||
|
||||
// Only matching kubeRuntimeAPIVersion is supported now
|
||||
// TODO: Runtime API machinery is under discussion at https://github.com/kubernetes/kubernetes/issues/28642
|
||||
if typedVersion.GetVersion() != kubeRuntimeAPIVersion {
|
||||
if typedVersion.Version != kubeRuntimeAPIVersion {
|
||||
glog.Errorf("Runtime api version %s is not supported, only %s is supported now",
|
||||
typedVersion.GetVersion(),
|
||||
typedVersion.Version,
|
||||
kubeRuntimeAPIVersion)
|
||||
return nil, ErrVersionNotSupported
|
||||
}
|
||||
|
||||
kubeRuntimeManager.runtimeName = typedVersion.GetRuntimeName()
|
||||
kubeRuntimeManager.runtimeName = typedVersion.RuntimeName
|
||||
glog.Infof("Container runtime %s initialized, version: %s, apiVersion: %s",
|
||||
typedVersion.GetRuntimeName(),
|
||||
typedVersion.GetRuntimeVersion(),
|
||||
typedVersion.GetRuntimeApiVersion())
|
||||
typedVersion.RuntimeName,
|
||||
typedVersion.RuntimeVersion,
|
||||
typedVersion.RuntimeApiVersion)
|
||||
|
||||
// If the container logs directory does not exist, create it.
|
||||
// TODO: create podLogsRootDirectory at kubelet.go when kubelet is refactored to
|
||||
|
@ -223,7 +224,7 @@ func (m *kubeGenericRuntimeManager) Version() (kubecontainer.Version, error) {
|
|||
return nil, err
|
||||
}
|
||||
|
||||
return newRuntimeVersion(typedVersion.GetVersion())
|
||||
return newRuntimeVersion(typedVersion.Version)
|
||||
}
|
||||
|
||||
// APIVersion returns the cached API version information of the container
|
||||
|
@ -236,7 +237,7 @@ func (m *kubeGenericRuntimeManager) APIVersion() (kubecontainer.Version, error)
|
|||
}
|
||||
typedVersion := versionObject.(*runtimeapi.VersionResponse)
|
||||
|
||||
return newRuntimeVersion(typedVersion.GetRuntimeApiVersion())
|
||||
return newRuntimeVersion(typedVersion.RuntimeApiVersion)
|
||||
}
|
||||
|
||||
// Status returns the status of the runtime. An error is returned if the Status
|
||||
|
@ -264,12 +265,12 @@ func (m *kubeGenericRuntimeManager) GetPods(all bool) ([]*kubecontainer.Pod, err
|
|||
glog.V(4).Infof("Sandbox does not have metadata: %+v", s)
|
||||
continue
|
||||
}
|
||||
podUID := kubetypes.UID(s.Metadata.GetUid())
|
||||
podUID := kubetypes.UID(s.Metadata.Uid)
|
||||
if _, ok := pods[podUID]; !ok {
|
||||
pods[podUID] = &kubecontainer.Pod{
|
||||
ID: podUID,
|
||||
Name: s.Metadata.GetName(),
|
||||
Namespace: s.Metadata.GetNamespace(),
|
||||
Name: s.Metadata.Name,
|
||||
Namespace: s.Metadata.Namespace,
|
||||
}
|
||||
}
|
||||
p := pods[podUID]
|
||||
|
@ -371,26 +372,26 @@ func (m *kubeGenericRuntimeManager) podSandboxChanged(pod *v1.Pod, podStatus *ku
|
|||
|
||||
readySandboxCount := 0
|
||||
for _, s := range podStatus.SandboxStatuses {
|
||||
if s.GetState() == runtimeapi.PodSandboxState_SANDBOX_READY {
|
||||
if s.State == runtimeapi.PodSandboxState_SANDBOX_READY {
|
||||
readySandboxCount++
|
||||
}
|
||||
}
|
||||
|
||||
// Needs to create a new sandbox when readySandboxCount > 1 or the ready sandbox is not the latest one.
|
||||
sandboxStatus := podStatus.SandboxStatuses[0]
|
||||
if readySandboxCount > 1 || sandboxStatus.GetState() != runtimeapi.PodSandboxState_SANDBOX_READY {
|
||||
if readySandboxCount > 1 || sandboxStatus.State != runtimeapi.PodSandboxState_SANDBOX_READY {
|
||||
glog.V(2).Infof("No ready sandbox for pod %q can be found. Need to start a new one", format.Pod(pod))
|
||||
return true, sandboxStatus.Metadata.GetAttempt() + 1, sandboxStatus.GetId()
|
||||
return true, sandboxStatus.Metadata.Attempt + 1, sandboxStatus.Id
|
||||
}
|
||||
|
||||
// Needs to create a new sandbox when network namespace changed.
|
||||
if sandboxStatus.Linux != nil && sandboxStatus.Linux.Namespaces.Options != nil &&
|
||||
sandboxStatus.Linux.Namespaces.Options.GetHostNetwork() != kubecontainer.IsHostNetworkPod(pod) {
|
||||
sandboxStatus.Linux.Namespaces.Options.HostNetwork != kubecontainer.IsHostNetworkPod(pod) {
|
||||
glog.V(2).Infof("Sandbox for pod %q has changed. Need to start a new one", format.Pod(pod))
|
||||
return true, sandboxStatus.Metadata.GetAttempt() + 1, ""
|
||||
return true, sandboxStatus.Metadata.Attempt + 1, ""
|
||||
}
|
||||
|
||||
return false, sandboxStatus.Metadata.GetAttempt(), sandboxStatus.GetId()
|
||||
return false, sandboxStatus.Metadata.Attempt, sandboxStatus.Id
|
||||
}
|
||||
|
||||
// checkAndKeepInitContainers keeps all successfully completed init containers. If there
|
||||
|
@ -793,10 +794,8 @@ func (m *kubeGenericRuntimeManager) isHostNetwork(podSandBoxID string, pod *v1.P
|
|||
return false, err
|
||||
}
|
||||
|
||||
if podStatus.Linux != nil && podStatus.Linux.Namespaces != nil && podStatus.Linux.Namespaces.Options != nil {
|
||||
if podStatus.Linux.Namespaces.Options.HostNetwork != nil {
|
||||
return podStatus.Linux.Namespaces.Options.GetHostNetwork(), nil
|
||||
}
|
||||
if nsOpts := podStatus.GetLinux().GetNamespaces().GetOptions(); nsOpts != nil {
|
||||
return nsOpts.HostNetwork, nil
|
||||
}
|
||||
|
||||
return false, nil
|
||||
|
@ -824,7 +823,7 @@ func (m *kubeGenericRuntimeManager) GetPodStatus(uid kubetypes.UID, name, namesp
|
|||
}
|
||||
|
||||
podFullName := format.Pod(&v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: namespace,
|
||||
UID: uid,
|
||||
|
@ -843,7 +842,7 @@ func (m *kubeGenericRuntimeManager) GetPodStatus(uid kubetypes.UID, name, namesp
|
|||
sandboxStatuses[idx] = podSandboxStatus
|
||||
|
||||
// Only get pod IP from latest sandbox
|
||||
if idx == 0 && podSandboxStatus.GetState() == runtimeapi.PodSandboxState_SANDBOX_READY {
|
||||
if idx == 0 && podSandboxStatus.State == runtimeapi.PodSandboxState_SANDBOX_READY {
|
||||
podIP = m.determinePodSandboxIP(namespace, name, podSandboxStatus)
|
||||
}
|
||||
}
|
||||
|
@ -899,7 +898,7 @@ func (m *kubeGenericRuntimeManager) UpdatePodCIDR(podCIDR string) error {
|
|||
return m.runtimeService.UpdateRuntimeConfig(
|
||||
&runtimeapi.RuntimeConfig{
|
||||
NetworkConfig: &runtimeapi.NetworkConfig{
|
||||
PodCidr: &podCIDR,
|
||||
PodCidr: podCIDR,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
|
65
vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_manager_test.go
generated
vendored
65
vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_manager_test.go
generated
vendored
|
@ -24,8 +24,10 @@ import (
|
|||
|
||||
cadvisorapi "github.com/google/cadvisor/info/v1"
|
||||
"github.com/stretchr/testify/assert"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
kubetypes "k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/util/flowcontrol"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/apis/componentconfig"
|
||||
apitest "k8s.io/kubernetes/pkg/kubelet/api/testing"
|
||||
|
@ -34,7 +36,6 @@ import (
|
|||
containertest "k8s.io/kubernetes/pkg/kubelet/container/testing"
|
||||
"k8s.io/kubernetes/pkg/kubelet/network"
|
||||
nettest "k8s.io/kubernetes/pkg/kubelet/network/testing"
|
||||
"k8s.io/kubernetes/pkg/util/flowcontrol"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -118,12 +119,12 @@ func makeFakePodSandbox(t *testing.T, m *kubeGenericRuntimeManager, template san
|
|||
podSandboxID := apitest.BuildSandboxName(config.Metadata)
|
||||
return &apitest.FakePodSandbox{
|
||||
PodSandboxStatus: runtimeapi.PodSandboxStatus{
|
||||
Id: &podSandboxID,
|
||||
Id: podSandboxID,
|
||||
Metadata: config.Metadata,
|
||||
State: &template.state,
|
||||
CreatedAt: &template.createdAt,
|
||||
State: template.state,
|
||||
CreatedAt: template.createdAt,
|
||||
Network: &runtimeapi.PodSandboxNetworkStatus{
|
||||
Ip: &apitest.FakePodSandboxIP,
|
||||
Ip: apitest.FakePodSandboxIP,
|
||||
},
|
||||
Labels: config.Labels,
|
||||
},
|
||||
|
@ -150,15 +151,15 @@ func makeFakeContainer(t *testing.T, m *kubeGenericRuntimeManager, template cont
|
|||
|
||||
podSandboxID := apitest.BuildSandboxName(sandboxConfig.Metadata)
|
||||
containerID := apitest.BuildContainerName(containerConfig.Metadata, podSandboxID)
|
||||
imageRef := containerConfig.Image.GetImage()
|
||||
imageRef := containerConfig.Image.Image
|
||||
return &apitest.FakeContainer{
|
||||
ContainerStatus: runtimeapi.ContainerStatus{
|
||||
Id: &containerID,
|
||||
Id: containerID,
|
||||
Metadata: containerConfig.Metadata,
|
||||
Image: containerConfig.Image,
|
||||
ImageRef: &imageRef,
|
||||
CreatedAt: &template.createdAt,
|
||||
State: &template.state,
|
||||
ImageRef: imageRef,
|
||||
CreatedAt: template.createdAt,
|
||||
State: template.state,
|
||||
Labels: containerConfig.Labels,
|
||||
Annotations: containerConfig.Annotations,
|
||||
},
|
||||
|
@ -187,7 +188,7 @@ func makeTestContainer(name, image string) v1.Container {
|
|||
// makeTestPod creates a test api pod.
|
||||
func makeTestPod(podName, podNamespace, podUID string, containers []v1.Container) *v1.Pod {
|
||||
return &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
UID: types.UID(podUID),
|
||||
Name: podName,
|
||||
Namespace: podNamespace,
|
||||
|
@ -222,7 +223,7 @@ func verifyPods(a, b []*kubecontainer.Pod) bool {
|
|||
func verifyFakeContainerList(fakeRuntime *apitest.FakeRuntimeService, expected []string) ([]string, bool) {
|
||||
actual := []string{}
|
||||
for _, c := range fakeRuntime.Containers {
|
||||
actual = append(actual, c.GetId())
|
||||
actual = append(actual, c.Id)
|
||||
}
|
||||
sort.Sort(sort.StringSlice(actual))
|
||||
sort.Sort(sort.StringSlice(expected))
|
||||
|
@ -269,7 +270,7 @@ func TestGetPodStatus(t *testing.T) {
|
|||
},
|
||||
}
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
UID: "12345678",
|
||||
Name: "foo",
|
||||
Namespace: "new",
|
||||
|
@ -295,7 +296,7 @@ func TestGetPods(t *testing.T) {
|
|||
assert.NoError(t, err)
|
||||
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
UID: "12345678",
|
||||
Name: "foo",
|
||||
Namespace: "new",
|
||||
|
@ -371,7 +372,7 @@ func TestGetPodContainerID(t *testing.T) {
|
|||
assert.NoError(t, err)
|
||||
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
UID: "12345678",
|
||||
Name: "foo",
|
||||
Namespace: "new",
|
||||
|
@ -410,7 +411,7 @@ func TestGetPodContainerID(t *testing.T) {
|
|||
Sandboxes: []*kubecontainer.Container{sandbox},
|
||||
}
|
||||
actual, err := m.GetPodContainerID(expectedPod)
|
||||
assert.Equal(t, fakeSandbox.GetId(), actual.ID)
|
||||
assert.Equal(t, fakeSandbox.Id, actual.ID)
|
||||
}
|
||||
|
||||
func TestGetNetNS(t *testing.T) {
|
||||
|
@ -418,7 +419,7 @@ func TestGetNetNS(t *testing.T) {
|
|||
assert.NoError(t, err)
|
||||
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
UID: "12345678",
|
||||
Name: "foo",
|
||||
Namespace: "new",
|
||||
|
@ -440,7 +441,7 @@ func TestGetNetNS(t *testing.T) {
|
|||
// Set fake sandbox and fake containers to fakeRuntime.
|
||||
sandbox, _ := makeAndSetFakePod(t, m, fakeRuntime, pod)
|
||||
|
||||
actual, err := m.GetNetNS(kubecontainer.ContainerID{ID: sandbox.GetId()})
|
||||
actual, err := m.GetNetNS(kubecontainer.ContainerID{ID: sandbox.Id})
|
||||
assert.Equal(t, "", actual)
|
||||
assert.Equal(t, "not supported", err.Error())
|
||||
}
|
||||
|
@ -450,7 +451,7 @@ func TestKillPod(t *testing.T) {
|
|||
assert.NoError(t, err)
|
||||
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
UID: "12345678",
|
||||
Name: "foo",
|
||||
Namespace: "new",
|
||||
|
@ -497,7 +498,7 @@ func TestKillPod(t *testing.T) {
|
|||
Sandboxes: []*kubecontainer.Container{
|
||||
{
|
||||
ID: kubecontainer.ContainerID{
|
||||
ID: fakeSandbox.GetId(),
|
||||
ID: fakeSandbox.Id,
|
||||
Type: apitest.FakeRuntimeName,
|
||||
},
|
||||
},
|
||||
|
@ -509,10 +510,10 @@ func TestKillPod(t *testing.T) {
|
|||
assert.Equal(t, 2, len(fakeRuntime.Containers))
|
||||
assert.Equal(t, 1, len(fakeRuntime.Sandboxes))
|
||||
for _, sandbox := range fakeRuntime.Sandboxes {
|
||||
assert.Equal(t, runtimeapi.PodSandboxState_SANDBOX_NOTREADY, sandbox.GetState())
|
||||
assert.Equal(t, runtimeapi.PodSandboxState_SANDBOX_NOTREADY, sandbox.State)
|
||||
}
|
||||
for _, c := range fakeRuntime.Containers {
|
||||
assert.Equal(t, runtimeapi.ContainerState_CONTAINER_EXITED, c.GetState())
|
||||
assert.Equal(t, runtimeapi.ContainerState_CONTAINER_EXITED, c.State)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -533,7 +534,7 @@ func TestSyncPod(t *testing.T) {
|
|||
},
|
||||
}
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
UID: "12345678",
|
||||
Name: "foo",
|
||||
Namespace: "new",
|
||||
|
@ -550,10 +551,10 @@ func TestSyncPod(t *testing.T) {
|
|||
assert.Equal(t, 2, len(fakeImage.Images))
|
||||
assert.Equal(t, 1, len(fakeRuntime.Sandboxes))
|
||||
for _, sandbox := range fakeRuntime.Sandboxes {
|
||||
assert.Equal(t, runtimeapi.PodSandboxState_SANDBOX_READY, sandbox.GetState())
|
||||
assert.Equal(t, runtimeapi.PodSandboxState_SANDBOX_READY, sandbox.State)
|
||||
}
|
||||
for _, c := range fakeRuntime.Containers {
|
||||
assert.Equal(t, runtimeapi.ContainerState_CONTAINER_RUNNING, c.GetState())
|
||||
assert.Equal(t, runtimeapi.ContainerState_CONTAINER_RUNNING, c.State)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -564,7 +565,7 @@ func TestPruneInitContainers(t *testing.T) {
|
|||
init1 := makeTestContainer("init1", "busybox")
|
||||
init2 := makeTestContainer("init2", "busybox")
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
UID: "12345678",
|
||||
Name: "foo",
|
||||
Namespace: "new",
|
||||
|
@ -588,7 +589,7 @@ func TestPruneInitContainers(t *testing.T) {
|
|||
|
||||
keep := map[kubecontainer.ContainerID]int{}
|
||||
m.pruneInitContainersBeforeStart(pod, podStatus, keep)
|
||||
expectedContainers := []string{fakes[0].GetId(), fakes[2].GetId()}
|
||||
expectedContainers := []string{fakes[0].Id, fakes[2].Id}
|
||||
if actual, ok := verifyFakeContainerList(fakeRuntime, expectedContainers); !ok {
|
||||
t.Errorf("expected %q, got %q", expectedContainers, actual)
|
||||
}
|
||||
|
@ -618,7 +619,7 @@ func TestSyncPodWithInitContainers(t *testing.T) {
|
|||
},
|
||||
}
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
UID: "12345678",
|
||||
Name: "foo",
|
||||
Namespace: "new",
|
||||
|
@ -634,11 +635,11 @@ func TestSyncPodWithInitContainers(t *testing.T) {
|
|||
buildContainerID := func(pod *v1.Pod, container v1.Container) string {
|
||||
uid := string(pod.UID)
|
||||
sandboxID := apitest.BuildSandboxName(&runtimeapi.PodSandboxMetadata{
|
||||
Name: &pod.Name,
|
||||
Uid: &uid,
|
||||
Namespace: &pod.Namespace,
|
||||
Name: pod.Name,
|
||||
Uid: uid,
|
||||
Namespace: pod.Namespace,
|
||||
})
|
||||
return apitest.BuildContainerName(&runtimeapi.ContainerMetadata{Name: &container.Name}, sandboxID)
|
||||
return apitest.BuildContainerName(&runtimeapi.ContainerMetadata{Name: container.Name}, sandboxID)
|
||||
}
|
||||
|
||||
backOff := flowcontrol.NewBackOff(time.Second, time.Minute)
|
||||
|
|
74
vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_sandbox.go
generated
vendored
74
vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_sandbox.go
generated
vendored
|
@ -41,7 +41,7 @@ func (m *kubeGenericRuntimeManager) createPodSandbox(pod *v1.Pod, attempt uint32
|
|||
}
|
||||
|
||||
// Create pod logs directory
|
||||
err = m.osInterface.MkdirAll(podSandboxConfig.GetLogDirectory(), 0755)
|
||||
err = m.osInterface.MkdirAll(podSandboxConfig.LogDirectory, 0755)
|
||||
if err != nil {
|
||||
message := fmt.Sprintf("Create pod log directory for pod %q failed: %v", format.Pod(pod), err)
|
||||
glog.Errorf(message)
|
||||
|
@ -65,10 +65,10 @@ func (m *kubeGenericRuntimeManager) generatePodSandboxConfig(pod *v1.Pod, attemp
|
|||
podUID := string(pod.UID)
|
||||
podSandboxConfig := &runtimeapi.PodSandboxConfig{
|
||||
Metadata: &runtimeapi.PodSandboxMetadata{
|
||||
Name: &pod.Name,
|
||||
Namespace: &pod.Namespace,
|
||||
Uid: &podUID,
|
||||
Attempt: &attempt,
|
||||
Name: pod.Name,
|
||||
Namespace: pod.Namespace,
|
||||
Uid: podUID,
|
||||
Attempt: attempt,
|
||||
},
|
||||
Labels: newPodLabels(pod),
|
||||
Annotations: newPodAnnotations(pod),
|
||||
|
@ -89,11 +89,11 @@ func (m *kubeGenericRuntimeManager) generatePodSandboxConfig(pod *v1.Pod, attemp
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
podSandboxConfig.Hostname = &hostname
|
||||
podSandboxConfig.Hostname = hostname
|
||||
}
|
||||
|
||||
logDir := buildPodLogsDirectory(pod.UID)
|
||||
podSandboxConfig.LogDirectory = &logDir
|
||||
podSandboxConfig.LogDirectory = logDir
|
||||
|
||||
cgroupParent := ""
|
||||
portMappings := []*runtimeapi.PortMapping{}
|
||||
|
@ -110,10 +110,10 @@ func (m *kubeGenericRuntimeManager) generatePodSandboxConfig(pod *v1.Pod, attemp
|
|||
containerPort := int32(port.ContainerPort)
|
||||
protocol := toRuntimeProtocol(port.Protocol)
|
||||
portMappings = append(portMappings, &runtimeapi.PortMapping{
|
||||
HostIp: &port.HostIP,
|
||||
HostPort: &hostPort,
|
||||
ContainerPort: &containerPort,
|
||||
Protocol: &protocol,
|
||||
HostIp: port.HostIP,
|
||||
HostPort: hostPort,
|
||||
ContainerPort: containerPort,
|
||||
Protocol: protocol,
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -131,20 +131,21 @@ func (m *kubeGenericRuntimeManager) generatePodSandboxConfig(pod *v1.Pod, attemp
|
|||
// generatePodSandboxLinuxConfig generates LinuxPodSandboxConfig from v1.Pod.
|
||||
func (m *kubeGenericRuntimeManager) generatePodSandboxLinuxConfig(pod *v1.Pod, cgroupParent string) *runtimeapi.LinuxPodSandboxConfig {
|
||||
lc := &runtimeapi.LinuxPodSandboxConfig{
|
||||
SecurityContext: &runtimeapi.LinuxSandboxSecurityContext{},
|
||||
}
|
||||
|
||||
if cgroupParent != "" {
|
||||
lc.CgroupParent = &cgroupParent
|
||||
CgroupParent: cgroupParent,
|
||||
SecurityContext: &runtimeapi.LinuxSandboxSecurityContext{
|
||||
Privileged: kubecontainer.HasPrivilegedContainer(pod),
|
||||
},
|
||||
}
|
||||
|
||||
if pod.Spec.SecurityContext != nil {
|
||||
sc := pod.Spec.SecurityContext
|
||||
lc.SecurityContext.RunAsUser = sc.RunAsUser
|
||||
if sc.RunAsUser != nil {
|
||||
lc.SecurityContext.RunAsUser = &runtimeapi.Int64Value{Value: *sc.RunAsUser}
|
||||
}
|
||||
lc.SecurityContext.NamespaceOptions = &runtimeapi.NamespaceOption{
|
||||
HostNetwork: &pod.Spec.HostNetwork,
|
||||
HostIpc: &pod.Spec.HostIPC,
|
||||
HostPid: &pod.Spec.HostPID,
|
||||
HostNetwork: pod.Spec.HostNetwork,
|
||||
HostIpc: pod.Spec.HostIPC,
|
||||
HostPid: pod.Spec.HostPID,
|
||||
}
|
||||
|
||||
if sc.FSGroup != nil {
|
||||
|
@ -158,19 +159,14 @@ func (m *kubeGenericRuntimeManager) generatePodSandboxLinuxConfig(pod *v1.Pod, c
|
|||
}
|
||||
if sc.SELinuxOptions != nil {
|
||||
lc.SecurityContext.SelinuxOptions = &runtimeapi.SELinuxOption{
|
||||
User: &sc.SELinuxOptions.User,
|
||||
Role: &sc.SELinuxOptions.Role,
|
||||
Type: &sc.SELinuxOptions.Type,
|
||||
Level: &sc.SELinuxOptions.Level,
|
||||
User: sc.SELinuxOptions.User,
|
||||
Role: sc.SELinuxOptions.Role,
|
||||
Type: sc.SELinuxOptions.Type,
|
||||
Level: sc.SELinuxOptions.Level,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if kubecontainer.HasPrivilegedContainer(pod) {
|
||||
privileged := true
|
||||
lc.SecurityContext.Privileged = &privileged
|
||||
}
|
||||
|
||||
return lc
|
||||
}
|
||||
|
||||
|
@ -180,7 +176,9 @@ func (m *kubeGenericRuntimeManager) getKubeletSandboxes(all bool) ([]*runtimeapi
|
|||
if !all {
|
||||
readyState := runtimeapi.PodSandboxState_SANDBOX_READY
|
||||
filter = &runtimeapi.PodSandboxFilter{
|
||||
State: &readyState,
|
||||
State: &runtimeapi.PodSandboxStateValue{
|
||||
State: readyState,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -194,7 +192,7 @@ func (m *kubeGenericRuntimeManager) getKubeletSandboxes(all bool) ([]*runtimeapi
|
|||
for _, s := range resp {
|
||||
if !isManagedByKubelet(s.Labels) {
|
||||
glog.V(5).Infof("Sandbox %s is not managed by kubelet", kubecontainer.BuildPodFullName(
|
||||
s.Metadata.GetName(), s.Metadata.GetNamespace()))
|
||||
s.Metadata.Name, s.Metadata.Namespace))
|
||||
continue
|
||||
}
|
||||
|
||||
|
@ -210,7 +208,7 @@ func (m *kubeGenericRuntimeManager) determinePodSandboxIP(podNamespace, podName
|
|||
glog.Warningf("Pod Sandbox status doesn't have network information, cannot report IP")
|
||||
return ""
|
||||
}
|
||||
ip := podSandbox.Network.GetIp()
|
||||
ip := podSandbox.Network.Ip
|
||||
if net.ParseIP(ip) == nil {
|
||||
glog.Warningf("Pod Sandbox reported an unparseable IP %v", ip)
|
||||
return ""
|
||||
|
@ -222,9 +220,13 @@ func (m *kubeGenericRuntimeManager) determinePodSandboxIP(podNamespace, podName
|
|||
// Param state could be nil in order to get all sandboxes belonging to same pod.
|
||||
func (m *kubeGenericRuntimeManager) getSandboxIDByPodUID(podUID kubetypes.UID, state *runtimeapi.PodSandboxState) ([]string, error) {
|
||||
filter := &runtimeapi.PodSandboxFilter{
|
||||
State: state,
|
||||
LabelSelector: map[string]string{types.KubernetesPodUIDLabel: string(podUID)},
|
||||
}
|
||||
if state != nil {
|
||||
filter.State = &runtimeapi.PodSandboxStateValue{
|
||||
State: *state,
|
||||
}
|
||||
}
|
||||
sandboxes, err := m.runtimeService.ListPodSandbox(filter)
|
||||
if err != nil {
|
||||
glog.Errorf("ListPodSandbox with pod UID %q failed: %v", podUID, err)
|
||||
|
@ -239,7 +241,7 @@ func (m *kubeGenericRuntimeManager) getSandboxIDByPodUID(podUID kubetypes.UID, s
|
|||
sandboxIDs := make([]string, len(sandboxes))
|
||||
sort.Sort(podSandboxByCreated(sandboxes))
|
||||
for i, s := range sandboxes {
|
||||
sandboxIDs[i] = s.GetId()
|
||||
sandboxIDs[i] = s.Id
|
||||
}
|
||||
|
||||
return sandboxIDs, nil
|
||||
|
@ -256,11 +258,11 @@ func (m *kubeGenericRuntimeManager) GetPortForward(podName, podNamespace string,
|
|||
}
|
||||
// TODO: Port is unused for now, but we may need it in the future.
|
||||
req := &runtimeapi.PortForwardRequest{
|
||||
PodSandboxId: &sandboxIDs[0],
|
||||
PodSandboxId: sandboxIDs[0],
|
||||
}
|
||||
resp, err := m.runtimeService.PortForward(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return url.Parse(resp.GetUrl())
|
||||
return url.Parse(resp.Url)
|
||||
}
|
||||
|
|
5
vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_sandbox_test.go
generated
vendored
5
vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_sandbox_test.go
generated
vendored
|
@ -22,6 +22,7 @@ import (
|
|||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
runtimeapi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
||||
containertest "k8s.io/kubernetes/pkg/kubelet/container/testing"
|
||||
|
@ -31,7 +32,7 @@ import (
|
|||
func TestCreatePodSandbox(t *testing.T) {
|
||||
fakeRuntime, _, m, err := createTestRuntimeManager()
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
UID: "12345678",
|
||||
Name: "bar",
|
||||
Namespace: "new",
|
||||
|
@ -57,7 +58,7 @@ func TestCreatePodSandbox(t *testing.T) {
|
|||
id, _, err := m.createPodSandbox(pod, 1)
|
||||
assert.NoError(t, err)
|
||||
fakeRuntime.AssertCalls([]string{"RunPodSandbox"})
|
||||
sandboxes, err := fakeRuntime.ListPodSandbox(&runtimeapi.PodSandboxFilter{Id: &id})
|
||||
sandboxes, err := fakeRuntime.ListPodSandbox(&runtimeapi.PodSandboxFilter{Id: id})
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, len(sandboxes), 1)
|
||||
// TODO Check pod sandbox configuration
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue