diff --git a/lock.json b/lock.json index f2edc74d..f26a1908 100644 --- a/lock.json +++ b/lock.json @@ -1,5 +1,5 @@ { - "memo": "c0b6d8274a7286341387dd0a060d5f1c171bf8658cc76602c6e026880a951a08", + "memo": "88a7bc044db73c7ab2adc009d3780db88e39e36cbb96e0f1f4b0636929481543", "projects": [ { "name": "cloud.google.com/go", @@ -176,7 +176,8 @@ "revision": "48702e0da86bd25e76cfef347e2adeb434a0d0a6", "packages": [ "daemon", - "dbus" + "dbus", + "util" ] }, { @@ -184,6 +185,7 @@ "version": "v3", "revision": "3ac0863d7acf3bc44daf49afef8919af12f704ef", "packages": [ + "dlopen", "health", "httputil", "timeutil" @@ -523,10 +525,14 @@ "revision": "b263a43430ac6996a4302b891688544225197294", "packages": [ "libcontainer/apparmor", + "libcontainer/cgroups", + "libcontainer/cgroups/fs", + "libcontainer/cgroups/systemd", "libcontainer/configs", "libcontainer/devices", "libcontainer/system", - "libcontainer/user" + "libcontainer/user", + "libcontainer/utils" ] }, { @@ -840,8 +846,8 @@ }, { "name": "k8s.io/kubernetes", - "version": "v1.6.1", - "revision": "b0b7a323cc5a4a2019b2e9520c21c7830b7f708e", + "version": "v1.6.4", + "revision": "d6f433224538d4f9ca2f7ae19b252e6fcb66a3ae", "packages": [ "pkg/api", "pkg/api/install", diff --git a/manifest.json b/manifest.json index 7c14f89a..8290f0e7 100644 --- a/manifest.json +++ b/manifest.json @@ -9,9 +9,6 @@ "github.com/containers/storage": { "branch": "master" }, - "github.com/opencontainers/image-spec": { - "version": "v1.0.0-rc5" - }, "github.com/docker/distribution": { "branch": "master" }, @@ -21,6 +18,9 @@ "github.com/godbus/dbus": { "version": "^4.0.0" }, + "github.com/opencontainers/image-spec": { + "version": "v1.0.0-rc5" + }, "github.com/opencontainers/runc": { "branch": "master" }, @@ -36,11 +36,11 @@ "google.golang.org/grpc": { "version": "v1.0.1-GA" }, - "k8s.io/kubernetes": { - "version": "v1.6.1" - }, "k8s.io/apimachinery": { "revision": "ae33df8bd0294deb6f1853cc107816dd181e0146" + }, + "k8s.io/kubernetes": { + "version": "~v1.6.1" } } } diff --git a/vendor/k8s.io/kubernetes/WORKSPACE b/vendor/k8s.io/kubernetes/WORKSPACE index 30e0d72e..b048cfc1 100644 --- a/vendor/k8s.io/kubernetes/WORKSPACE +++ b/vendor/k8s.io/kubernetes/WORKSPACE @@ -24,33 +24,39 @@ go_repositories() debs = ( ( "busybox_deb", - "7465567f5e5255188b1d004d7081066cd79f77a5c18a5d418d27966d698e0bef", - "http://ftp.us.debian.org/debian/pool/main/b/busybox/busybox-static_1.22.0-19+b2_amd64.deb", + "5f81f140777454e71b9e5bfdce9c89993de5ddf4a7295ea1cfda364f8f630947", + "http://ftp.us.debian.org/debian/pool/main/b/busybox/busybox-static_1.22.0-19+b3_amd64.deb", + "https://storage.googleapis.com/kubernetes-release/debs/busybox-static_1.22.0-19+b3_amd64.deb", ), ( "libc_deb", - "6bbd506b171a9f29b09fde77e2749c0aa0c1439058df9d1a6408d464069b7dd6", - "http://ftp.us.debian.org/debian/pool/main/g/glibc/libc6_2.24-9_amd64.deb", + "372aac4a9ce9dbb26a08de0b9c41b0500ba019430295d29f39566483f5f32732", + "http://ftp.us.debian.org/debian/pool/main/g/glibc/libc6_2.24-10_amd64.deb", + "https://storage.googleapis.com/kubernetes-release/debs/libc6_2.24-10_amd64.deb", ), ( "iptables_deb", "7747388a97ba71fede302d70361c81d486770a2024185514c18b5d8eab6aaf4e", "http://ftp.us.debian.org/debian/pool/main/i/iptables/iptables_1.4.21-2+b1_amd64.deb", + "https://storage.googleapis.com/kubernetes-release/debs/iptables_1.4.21-2+b1_amd64.deb", ), ( "libnetlink_deb", "5d486022cd9e047e9afbb1617cf4519c0decfc3d2c1fad7e7fe5604943dbbf37", "http://ftp.us.debian.org/debian/pool/main/libn/libnfnetlink/libnfnetlink0_1.0.1-3_amd64.deb", + "https://storage.googleapis.com/kubernetes-release/debs/libnfnetlink0_1.0.1-3_amd64.deb", ), ( "libxtables_deb", "6783f316af4cbf3ada8b9a2b7bb5f53a87c0c2575c1903ce371fdbd45d3626c6", "http://ftp.us.debian.org/debian/pool/main/i/iptables/libxtables10_1.4.21-2+b1_amd64.deb", + "https://storage.googleapis.com/kubernetes-release/debs/libxtables10_1.4.21-2+b1_amd64.deb", ), ( "iproute2_deb", "3ce9cb1d03a2a1359cbdd4f863b15d0c906096bf713e8eb688149da2f4e350bc", "http://ftp.us.debian.org/debian/pool/main/i/iproute2/iproute_3.16.0-2_all.deb", + "https://storage.googleapis.com/kubernetes-release/debs/iproute_3.16.0-2_all.deb", ), ) @@ -58,7 +64,7 @@ debs = ( name = name, sha256 = sha256, url = url, -) for name, sha256, url in debs] +) for name, sha256, origin, url in debs] http_file( name = "kubernetes_cni", diff --git a/vendor/k8s.io/kubernetes/api/openapi-spec/swagger.json b/vendor/k8s.io/kubernetes/api/openapi-spec/swagger.json index 28170ddb..c4fe99f5 100644 --- a/vendor/k8s.io/kubernetes/api/openapi-spec/swagger.json +++ b/vendor/k8s.io/kubernetes/api/openapi-spec/swagger.json @@ -2,7 +2,7 @@ "swagger": "2.0", "info": { "title": "Kubernetes", - "version": "v1.6.1" + "version": "v1.6.4" }, "paths": { "/api/": { @@ -43262,7 +43262,7 @@ ], "properties": { "concurrencyPolicy": { - "description": "ConcurrencyPolicy specifies how to treat concurrent executions of a Job.", + "description": "ConcurrencyPolicy specifies how to treat concurrent executions of a Job. Defaults to Allow.", "type": "string" }, "failedJobsHistoryLimit": { diff --git a/vendor/k8s.io/kubernetes/cluster/addons/dns/kubedns-sa.yaml b/vendor/k8s.io/kubernetes/cluster/addons/dns/kubedns-sa.yaml index b7524758..7455b2e2 100644 --- a/vendor/k8s.io/kubernetes/cluster/addons/dns/kubedns-sa.yaml +++ b/vendor/k8s.io/kubernetes/cluster/addons/dns/kubedns-sa.yaml @@ -2,6 +2,7 @@ apiVersion: v1 kind: ServiceAccount metadata: name: kube-dns + namespace: kube-system labels: kubernetes.io/cluster-service: "true" addonmanager.kubernetes.io/mode: Reconcile diff --git a/vendor/k8s.io/kubernetes/cluster/addons/fluentd-gcp/fluentd-gcp-configmap.yaml b/vendor/k8s.io/kubernetes/cluster/addons/fluentd-gcp/fluentd-gcp-configmap.yaml index 98acff37..fc6537df 100644 --- a/vendor/k8s.io/kubernetes/cluster/addons/fluentd-gcp/fluentd-gcp-configmap.yaml +++ b/vendor/k8s.io/kubernetes/cluster/addons/fluentd-gcp/fluentd-gcp-configmap.yaml @@ -79,9 +79,6 @@ data: type counter name logging_line_count desc Total number of lines generated by application containers - - tag ${tag} - @@ -342,9 +339,8 @@ data: type counter name logging_entry_count - desc Total number of log entries generated by either an application container or a system component + desc Total number of log entries generated by either application containers or system components - tag ${tag} component container @@ -376,9 +372,8 @@ data: type counter name logging_entry_count - desc Total number of log entries generated by either an application container or a system component + desc Total number of log entries generated by either application containers or system components - tag ${tag} component system diff --git a/vendor/k8s.io/kubernetes/cluster/common.sh b/vendor/k8s.io/kubernetes/cluster/common.sh index 900b6f86..b97c96ef 100755 --- a/vendor/k8s.io/kubernetes/cluster/common.sh +++ b/vendor/k8s.io/kubernetes/cluster/common.sh @@ -1,6 +1,6 @@ #!/bin/bash -# Copyright 2015 The Kubernetes Authors. +# Copyright 2017 The Kubernetes Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -461,8 +461,8 @@ function find-release-tars() { # This tarball is used by GCI, Ubuntu Trusty, and Container Linux. KUBE_MANIFESTS_TAR= - if [[ "${MASTER_OS_DISTRIBUTION:-}" == "trusty" || "${MASTER_OS_DISTRIBUTION:-}" == "gci" || "${MASTER_OS_DISTRIBUTION:-}" == "container-linux" ]] || \ - [[ "${NODE_OS_DISTRIBUTION:-}" == "trusty" || "${NODE_OS_DISTRIBUTION:-}" == "gci" || "${NODE_OS_DISTRIBUTION:-}" == "container-linux" ]] ; then + if [[ "${MASTER_OS_DISTRIBUTION:-}" == "trusty" || "${MASTER_OS_DISTRIBUTION:-}" == "gci" || "${MASTER_OS_DISTRIBUTION:-}" == "container-linux" || "${MASTER_OS_DISTRIBUTION:-}" == "ubuntu" ]] || \ + [[ "${NODE_OS_DISTRIBUTION:-}" == "trusty" || "${NODE_OS_DISTRIBUTION:-}" == "gci" || "${NODE_OS_DISTRIBUTION:-}" == "container-linux" || "${NODE_OS_DISTRIBUTION:-}" == "ubuntu" ]] ; then KUBE_MANIFESTS_TAR=$(find-tar kubernetes-manifests.tar.gz) fi } @@ -598,7 +598,9 @@ function build-kube-env { local salt_tar_url=$SALT_TAR_URL local kube_manifests_tar_url="${KUBE_MANIFESTS_TAR_URL:-}" if [[ "${master}" == "true" && "${MASTER_OS_DISTRIBUTION}" == "container-linux" ]] || \ - [[ "${master}" == "false" && "${NODE_OS_DISTRIBUTION}" == "container-linux" ]] ; then + [[ "${master}" == "false" && "${NODE_OS_DISTRIBUTION}" == "container-linux" ]] || \ + [[ "${master}" == "true" && "${MASTER_OS_DISTRIBUTION}" == "ubuntu" ]] || \ + [[ "${master}" == "false" && "${NODE_OS_DISTRIBUTION}" == "ubuntu" ]] ; then # TODO: Support fallback .tar.gz settings on Container Linux server_binary_tar_url=$(split_csv "${SERVER_BINARY_TAR_URL}") salt_tar_url=$(split_csv "${SALT_TAR_URL}") @@ -682,8 +684,8 @@ EOF TERMINATED_POD_GC_THRESHOLD: $(yaml-quote ${TERMINATED_POD_GC_THRESHOLD}) EOF fi - if [[ "${master}" == "true" && ("${MASTER_OS_DISTRIBUTION}" == "trusty" || "${MASTER_OS_DISTRIBUTION}" == "gci" || "${MASTER_OS_DISTRIBUTION}" == "container-linux") ]] || \ - [[ "${master}" == "false" && ("${NODE_OS_DISTRIBUTION}" == "trusty" || "${NODE_OS_DISTRIBUTION}" == "gci" || "${NODE_OS_DISTRIBUTION}" == "container-linux") ]] ; then + if [[ "${master}" == "true" && ("${MASTER_OS_DISTRIBUTION}" == "trusty" || "${MASTER_OS_DISTRIBUTION}" == "gci" || "${MASTER_OS_DISTRIBUTION}" == "container-linux") || "${MASTER_OS_DISTRIBUTION}" == "ubuntu" ]] || \ + [[ "${master}" == "false" && ("${NODE_OS_DISTRIBUTION}" == "trusty" || "${NODE_OS_DISTRIBUTION}" == "gci" || "${NODE_OS_DISTRIBUTION}" == "container-linux") || "${NODE_OS_DISTRIBUTION}" = "ubuntu" ]] ; then cat >>$file <&2 exit 1 fi -if [[ "${MASTER_OS_DISTRIBUTION}" == "container-linux" || "${MASTER_OS_DISTRIBUTION}" == "trusty" || "${MASTER_OS_DISTRIBUTION}" == "gci" ]]; then +if [[ "${MASTER_OS_DISTRIBUTION}" == "container-linux" || "${MASTER_OS_DISTRIBUTION}" == "trusty" || "${MASTER_OS_DISTRIBUTION}" == "gci" || "${MASTER_OS_DISTRIBUTION}" == "ubuntu" ]]; then source "${KUBE_ROOT}/cluster/gce/${MASTER_OS_DISTRIBUTION}/master-helper.sh" else echo "Cannot operate on cluster using master os distro: ${MASTER_OS_DISTRIBUTION}" >&2 @@ -592,8 +592,8 @@ function kube-up() { parse-master-env create-nodes elif [[ ${KUBE_REPLICATE_EXISTING_MASTER:-} == "true" ]]; then - if [[ "${MASTER_OS_DISTRIBUTION}" != "gci" && "${MASTER_OS_DISTRIBUTION}" != "debian" ]]; then - echo "Master replication supported only for gci and debian" + if [[ "${MASTER_OS_DISTRIBUTION}" != "gci" && "${MASTER_OS_DISTRIBUTION}" != "debian" && "${MASTER_OS_DISTRIBUTION}" != "ubuntu" ]]; then + echo "Master replication supported only for gci, debian, and ubuntu" return 1 fi create-loadbalancer diff --git a/vendor/k8s.io/kubernetes/cluster/gke/config-test.sh b/vendor/k8s.io/kubernetes/cluster/gke/config-test.sh index fea85e3f..e25d0a2a 100644 --- a/vendor/k8s.io/kubernetes/cluster/gke/config-test.sh +++ b/vendor/k8s.io/kubernetes/cluster/gke/config-test.sh @@ -19,6 +19,7 @@ CLUSTER_NAME="${CLUSTER_NAME:-${USER}-gke-e2e}" NETWORK=${KUBE_GKE_NETWORK:-e2e} NODE_TAG="k8s-${CLUSTER_NAME}-node" IMAGE_TYPE="${KUBE_GKE_IMAGE_TYPE:-container_vm}" +ENABLE_KUBERNETES_ALPHA="${KUBE_GKE_ENABLE_KUBERNETES_ALPHA:-}" KUBE_DELETE_NETWORK=${KUBE_DELETE_NETWORK:-true} diff --git a/vendor/k8s.io/kubernetes/cluster/gke/util.sh b/vendor/k8s.io/kubernetes/cluster/gke/util.sh index ecdd36bd..ccbb75bc 100755 --- a/vendor/k8s.io/kubernetes/cluster/gke/util.sh +++ b/vendor/k8s.io/kubernetes/cluster/gke/util.sh @@ -135,6 +135,7 @@ function validate-cluster { # HEAPSTER_MACHINE_TYPE (optional) # CLUSTER_IP_RANGE (optional) # GKE_CREATE_FLAGS (optional, space delineated) +# ENABLE_KUBERNETES_ALPHA (optional) function kube-up() { echo "... in gke:kube-up()" >&2 detect-project >&2 @@ -184,6 +185,10 @@ function kube-up() { "--machine-type=${MACHINE_TYPE}" ) + if [[ ! -z "${ENABLE_KUBERNETES_ALPHA:-}" ]]; then + create_args+=("--enable-kubernetes-alpha") + fi + if [[ ! -z "${ADDITIONAL_ZONES:-}" ]]; then create_args+=("--additional-zones=${ADDITIONAL_ZONES}") fi diff --git a/vendor/k8s.io/kubernetes/cluster/saltbase/salt/cluster-autoscaler/cluster-autoscaler.manifest b/vendor/k8s.io/kubernetes/cluster/saltbase/salt/cluster-autoscaler/cluster-autoscaler.manifest index bdbe020d..6fe0506f 100644 --- a/vendor/k8s.io/kubernetes/cluster/saltbase/salt/cluster-autoscaler/cluster-autoscaler.manifest +++ b/vendor/k8s.io/kubernetes/cluster/saltbase/salt/cluster-autoscaler/cluster-autoscaler.manifest @@ -25,12 +25,12 @@ "containers": [ { "name": "cluster-autoscaler", - "image": "gcr.io/google_containers/cluster-autoscaler:v0.5.1", + "image": "gcr.io/google_containers/cluster-autoscaler:v0.5.4", "command": [ "./run.sh", "--kubernetes=http://127.0.0.1:8080?inClusterConfig=f", "--v=4", - "--stderrthreshold=info", + "--logtostderr=true", "--write-status-configmap=true", "{{params}}" ], diff --git a/vendor/k8s.io/kubernetes/cluster/saltbase/salt/l7-gcp/glbc.manifest b/vendor/k8s.io/kubernetes/cluster/saltbase/salt/l7-gcp/glbc.manifest index b2760a90..b8a1f1a0 100644 --- a/vendor/k8s.io/kubernetes/cluster/saltbase/salt/l7-gcp/glbc.manifest +++ b/vendor/k8s.io/kubernetes/cluster/saltbase/salt/l7-gcp/glbc.manifest @@ -1,18 +1,18 @@ apiVersion: v1 kind: Pod metadata: - name: l7-lb-controller-v0.9.2 + name: l7-lb-controller-v0.9.3 namespace: kube-system labels: k8s-app: glbc - version: v0.9.2 + version: v0.9.3 kubernetes.io/cluster-service: "true" kubernetes.io/name: "GLBC" spec: terminationGracePeriodSeconds: 600 hostNetwork: true containers: - - image: gcr.io/google_containers/glbc:0.9.2 + - image: gcr.io/google_containers/glbc:0.9.3 livenessProbe: httpGet: path: /healthz @@ -43,7 +43,7 @@ spec: # TODO: split this out into args when we no longer need to pipe stdout to a file #6428 - sh - -c - - '/glbc --verbose=true --default-backend-service=kube-system/default-http-backend --sync-period=600s --running-in-cluster=false --use-real-cloud=true --config-file-path=/etc/gce.conf --healthz-port=8086 1>>/var/log/glbc.log 2>&1' + - '/glbc --verbose=true --apiserver-host=http://localhost:8080 --default-backend-service=kube-system/default-http-backend --sync-period=600s --running-in-cluster=false --use-real-cloud=true --config-file-path=/etc/gce.conf --healthz-port=8086 1>>/var/log/glbc.log 2>&1' volumes: - hostPath: path: /etc/gce.conf diff --git a/vendor/k8s.io/kubernetes/cmd/kube-apiserver/app/BUILD b/vendor/k8s.io/kubernetes/cmd/kube-apiserver/app/BUILD index 6f7d1617..c7b9ca68 100644 --- a/vendor/k8s.io/kubernetes/cmd/kube-apiserver/app/BUILD +++ b/vendor/k8s.io/kubernetes/cmd/kube-apiserver/app/BUILD @@ -31,9 +31,11 @@ go_library( "//pkg/kubeapiserver:go_default_library", "//pkg/kubeapiserver/admission:go_default_library", "//pkg/kubeapiserver/authenticator:go_default_library", + "//pkg/kubeapiserver/authorizer/modes:go_default_library", "//pkg/master:go_default_library", "//pkg/master/tunneler:go_default_library", "//pkg/registry/cachesize:go_default_library", + "//pkg/registry/rbac/rest:go_default_library", "//pkg/version:go_default_library", "//plugin/pkg/admission/admit:go_default_library", "//plugin/pkg/admission/alwayspullimages:go_default_library", diff --git a/vendor/k8s.io/kubernetes/cmd/kube-apiserver/app/server.go b/vendor/k8s.io/kubernetes/cmd/kube-apiserver/app/server.go index 15926581..4392488e 100644 --- a/vendor/k8s.io/kubernetes/cmd/kube-apiserver/app/server.go +++ b/vendor/k8s.io/kubernetes/cmd/kube-apiserver/app/server.go @@ -63,9 +63,11 @@ import ( "k8s.io/kubernetes/pkg/kubeapiserver" kubeadmission "k8s.io/kubernetes/pkg/kubeapiserver/admission" kubeauthenticator "k8s.io/kubernetes/pkg/kubeapiserver/authenticator" + "k8s.io/kubernetes/pkg/kubeapiserver/authorizer/modes" "k8s.io/kubernetes/pkg/master" "k8s.io/kubernetes/pkg/master/tunneler" "k8s.io/kubernetes/pkg/registry/cachesize" + rbacrest "k8s.io/kubernetes/pkg/registry/rbac/rest" "k8s.io/kubernetes/pkg/version" "k8s.io/kubernetes/plugin/pkg/auth/authenticator/token/bootstrap" ) @@ -324,6 +326,9 @@ func BuildMasterConfig(s *options.ServerRunOptions) (*master.Config, informers.S if err != nil { return nil, nil, fmt.Errorf("invalid authentication config: %v", err) } + if !sets.NewString(s.Authorization.Modes()...).Has(modes.ModeRBAC) { + genericConfig.DisabledPostStartHooks.Insert(rbacrest.PostStartHookName) + } authorizationConfig := s.Authorization.ToAuthorizationConfig(sharedInformers) apiAuthorizer, err := authorizationConfig.New() diff --git a/vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1alpha1/defaults.go b/vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1alpha1/defaults.go index c7d878cb..4ea69723 100644 --- a/vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1alpha1/defaults.go +++ b/vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1alpha1/defaults.go @@ -26,14 +26,12 @@ import ( const ( DefaultServiceDNSDomain = "cluster.local" DefaultServicesSubnet = "10.96.0.0/12" - DefaultKubernetesVersion = "latest-1.6" - // This is only for clusters without internet, were the latest stable version can't be determined - DefaultKubernetesFallbackVersion = "v1.6.0" - DefaultAPIBindPort = 6443 - DefaultDiscoveryBindPort = 9898 - DefaultAuthorizationMode = "RBAC" - DefaultCACertPath = "/etc/kubernetes/pki/ca.crt" - DefaultCertificatesDir = "/etc/kubernetes/pki" + DefaultKubernetesVersion = "stable-1.6" + DefaultAPIBindPort = 6443 + DefaultDiscoveryBindPort = 9898 + DefaultAuthorizationMode = "RBAC" + DefaultCACertPath = "/etc/kubernetes/pki/ca.crt" + DefaultCertificatesDir = "/etc/kubernetes/pki" ) func addDefaultingFuncs(scheme *runtime.Scheme) error { @@ -46,7 +44,7 @@ func addDefaultingFuncs(scheme *runtime.Scheme) error { func SetDefaults_MasterConfiguration(obj *MasterConfiguration) { if obj.KubernetesVersion == "" { - obj.KubernetesVersion = DefaultKubernetesFallbackVersion + obj.KubernetesVersion = DefaultKubernetesVersion } if obj.API.BindPort == 0 { diff --git a/vendor/k8s.io/kubernetes/cmd/kubeadm/app/cmd/defaults.go b/vendor/k8s.io/kubernetes/cmd/kubeadm/app/cmd/defaults.go index 49fce929..c503809c 100644 --- a/vendor/k8s.io/kubernetes/cmd/kubeadm/app/cmd/defaults.go +++ b/vendor/k8s.io/kubernetes/cmd/kubeadm/app/cmd/defaults.go @@ -22,7 +22,6 @@ import ( netutil "k8s.io/apimachinery/pkg/util/net" kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" - kubeadmapiext "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1alpha1" kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants" kubeadmutil "k8s.io/kubernetes/cmd/kubeadm/app/util" tokenutil "k8s.io/kubernetes/cmd/kubeadm/app/util/token" @@ -45,11 +44,7 @@ func setInitDynamicDefaults(cfg *kubeadmapi.MasterConfiguration) error { // Validate version argument ver, err := kubeadmutil.KubernetesReleaseVersion(cfg.KubernetesVersion) if err != nil { - if cfg.KubernetesVersion != kubeadmapiext.DefaultKubernetesVersion { - return err - } else { - ver = kubeadmapiext.DefaultKubernetesFallbackVersion - } + return err } cfg.KubernetesVersion = ver diff --git a/vendor/k8s.io/kubernetes/cmd/kubeadm/app/cmd/reset.go b/vendor/k8s.io/kubernetes/cmd/kubeadm/app/cmd/reset.go index c84970a2..64933ffb 100644 --- a/vendor/k8s.io/kubernetes/cmd/kubeadm/app/cmd/reset.go +++ b/vendor/k8s.io/kubernetes/cmd/kubeadm/app/cmd/reset.go @@ -105,16 +105,16 @@ func (r *Reset) Run(out io.Writer) error { } dockerCheck := preflight.ServiceCheck{Service: "docker", CheckIfActive: true} - if warnings, errors := dockerCheck.Check(); len(warnings) == 0 && len(errors) == 0 { + if _, errors := dockerCheck.Check(); len(errors) == 0 { fmt.Println("[reset] Removing kubernetes-managed containers") - if err := exec.Command("sh", "-c", "docker ps | grep 'k8s_' | awk '{print $1}' | xargs -r docker rm --force --volumes").Run(); err != nil { + if err := exec.Command("sh", "-c", "docker ps -a --filter name=k8s_ -q | xargs -r docker rm --force --volumes").Run(); err != nil { fmt.Println("[reset] Failed to stop the running containers") } } else { fmt.Println("[reset] docker doesn't seem to be running, skipping the removal of running kubernetes containers") } - dirsToClean := []string{"/var/lib/kubelet", "/etc/cni/net.d"} + dirsToClean := []string{"/var/lib/kubelet", "/etc/cni/net.d", "/var/lib/dockershim"} // Only clear etcd data when the etcd manifest is found. In case it is not found, we must assume that the user // provided external etcd endpoints. In that case, it is his own responsibility to reset etcd diff --git a/vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/apiconfig/setupmaster.go b/vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/apiconfig/setupmaster.go index 37ec2b43..e05e3e02 100644 --- a/vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/apiconfig/setupmaster.go +++ b/vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/apiconfig/setupmaster.go @@ -61,7 +61,7 @@ func attemptToUpdateMasterRoleLabelsAndTaints(client *clientset.Clientset) error // The master node is tainted and labelled accordingly n.ObjectMeta.Labels[kubeadmconstants.LabelNodeRoleMaster] = "" - n.Spec.Taints = []v1.Taint{{Key: kubeadmconstants.LabelNodeRoleMaster, Value: "", Effect: "NoSchedule"}} + n.Spec.Taints = append(n.Spec.Taints, v1.Taint{Key: kubeadmconstants.LabelNodeRoleMaster, Value: "", Effect: "NoSchedule"}) newData, err := json.Marshal(n) if err != nil { diff --git a/vendor/k8s.io/kubernetes/examples/persistent-volume-provisioning/README.md b/vendor/k8s.io/kubernetes/examples/persistent-volume-provisioning/README.md index 912b6e00..bd388b52 100644 --- a/vendor/k8s.io/kubernetes/examples/persistent-volume-provisioning/README.md +++ b/vendor/k8s.io/kubernetes/examples/persistent-volume-provisioning/README.md @@ -61,9 +61,11 @@ metadata: provisioner: kubernetes.io/vsphere-volume parameters: diskformat: eagerzeroedthick + fstype: ext3 ``` * `diskformat`: `thin`, `zeroedthick` and `eagerzeroedthick`. See vSphere docs for details. Default: `"thin"`. +* `fstype`: fstype that are supported by kubernetes. Default: `"ext4"`. #### Portworx Volume diff --git a/vendor/k8s.io/kubernetes/examples/volumes/vsphere/README.md b/vendor/k8s.io/kubernetes/examples/volumes/vsphere/README.md index bffe6bfe..01fc250d 100644 --- a/vendor/k8s.io/kubernetes/examples/volumes/vsphere/README.md +++ b/vendor/k8s.io/kubernetes/examples/volumes/vsphere/README.md @@ -5,6 +5,8 @@ - [Volumes](#volumes) - [Persistent Volumes](#persistent-volumes) - [Storage Class](#storage-class) + - [Virtual SAN policy support inside Kubernetes](#virtual-san-policy-support-inside-kubernetes) + - [Stateful Set](#stateful-set) ## Prerequisites @@ -212,6 +214,7 @@ provisioner: kubernetes.io/vsphere-volume parameters: diskformat: zeroedthick + fstype: ext3 ``` [Download example](vsphere-volume-sc-fast.yaml?raw=true) @@ -243,11 +246,12 @@ ``` bash $ kubectl describe storageclass fast - Name: fast - Annotations: - Provisioner: kubernetes.io/vsphere-volume - Parameters: diskformat=zeroedthick - No events. + Name: fast + IsDefaultClass: No + Annotations: + Provisioner: kubernetes.io/vsphere-volume + Parameters: diskformat=zeroedthick,fstype=ext3 + No events. ``` 2. Create Persistent Volume Claim. @@ -281,14 +285,19 @@ ``` bash $ kubectl describe pvc pvcsc001 - Name: pvcsc001 - Namespace: default - Status: Bound - Volume: pvc-80f7b5c1-94b6-11e6-a24f-005056a79d2d - Labels: - Capacity: 2Gi - Access Modes: RWO - No events. + Name: pvcsc001 + Namespace: default + StorageClass: fast + Status: Bound + Volume: pvc-83295256-f8e0-11e6-8263-005056b2349c + Labels: + Capacity: 2Gi + Access Modes: RWO + Events: + FirstSeen LastSeen Count From SubObjectPath Type Reason Message + --------- -------- ----- ---- ------------- -------- ------ ------- + 1m 1m 1 persistentvolume-controller Normal ProvisioningSucceeded Successfully provisioned volume pvc-83295256-f8e0-11e6-8263-005056b2349c using kubernetes.io/vsphere-volume + ``` Persistent Volume is automatically created and is bounded to this pvc. @@ -296,19 +305,20 @@ Verifying persistent volume claim is created: ``` bash - $ kubectl describe pv pvc-80f7b5c1-94b6-11e6-a24f-005056a79d2d - Name: pvc-80f7b5c1-94b6-11e6-a24f-005056a79d2d - Labels: - Status: Bound - Claim: default/pvcsc001 - Reclaim Policy: Delete - Access Modes: RWO - Capacity: 2Gi + $ kubectl describe pv pvc-83295256-f8e0-11e6-8263-005056b2349c + Name: pvc-83295256-f8e0-11e6-8263-005056b2349c + Labels: + StorageClass: fast + Status: Bound + Claim: default/pvcsc001 + Reclaim Policy: Delete + Access Modes: RWO + Capacity: 2Gi Message: Source: - Type: vSphereVolume (a Persistent Disk resource in vSphere) - VolumePath: [datastore1] kubevols/kubernetes-dynamic-pvc-80f7b5c1-94b6-11e6-a24f-005056a79d2d.vmdk - FSType: ext4 + Type: vSphereVolume (a Persistent Disk resource in vSphere) + VolumePath: [datastore1] kubevols/kubernetes-dynamic-pvc-83295256-f8e0-11e6-8263-005056b2349c.vmdk + FSType: ext3 No events. ``` @@ -353,6 +363,257 @@ pvpod 1/1 Running 0 48m ``` +### Virtual SAN policy support inside Kubernetes + + Vsphere Infrastructure(VI) Admins will have the ability to specify custom Virtual SAN Storage Capabilities during dynamic volume provisioning. You can now define storage requirements, such as performance and availability, in the form of storage capabilities during dynamic volume provisioning. The storage capability requirements are converted into a Virtual SAN policy which are then pushed down to the Virtual SAN layer when a persistent volume (virtual disk) is being created. The virtual disk is distributed across the Virtual SAN datastore to meet the requirements. + + The official [VSAN policy documentation](https://pubs.vmware.com/vsphere-65/index.jsp?topic=%2Fcom.vmware.vsphere.virtualsan.doc%2FGUID-08911FD3-2462-4C1C-AE81-0D4DBC8F7990.html) describes in detail about each of the individual storage capabilities that are supported by VSAN. The user can specify these storage capabilities as part of storage class defintion based on his application needs. + + The policy settings can be one or more of the following: + + * *hostFailuresToTolerate*: represents NumberOfFailuresToTolerate + * *diskStripes*: represents NumberofDiskStripesPerObject + * *objectSpaceReservation*: represents ObjectSpaceReservation + * *cacheReservation*: represents FlashReadCacheReservation + * *iopsLimit*: represents IOPSLimitForObject + * *forceProvisioning*: represents if volume must be Force Provisioned + + __Note: Here you don't need to create persistent volume it is created for you.__ + 1. Create Storage Class. + + Example 1: + + ```yaml + kind: StorageClass + apiVersion: storage.k8s.io/v1beta1 + metadata: + name: fast + provisioner: kubernetes.io/vsphere-volume + parameters: + diskformat: zeroedthick + hostFailuresToTolerate: "2" + cachereservation: "20" + ``` + [Download example](vsphere-volume-sc-vsancapabilities.yaml?raw=true) + + Here a persistent volume will be created with the Virtual SAN capabilities - hostFailuresToTolerate to 2 and cachereservation is 20% read cache reserved for storage object. Also the persistent volume will be *zeroedthick* disk. + The official [VSAN policy documentation](https://pubs.vmware.com/vsphere-65/index.jsp?topic=%2Fcom.vmware.vsphere.virtualsan.doc%2FGUID-08911FD3-2462-4C1C-AE81-0D4DBC8F7990.html) describes in detail about each of the individual storage capabilities that are supported by VSAN and can be configured on the virtual disk. + + You can also specify the datastore in the Storageclass as shown in example 2. The volume will be created on the datastore specified in the storage class. + This field is optional. If not specified as shown in example 1, the volume will be created on the datastore specified in the vsphere config file used to initialize the vSphere Cloud Provider. + + Example 2: + + ```yaml + kind: StorageClass + apiVersion: storage.k8s.io/v1beta1 + metadata: + name: fast + provisioner: kubernetes.io/vsphere-volume + parameters: + diskformat: zeroedthick + datastore: VSANDatastore + hostFailuresToTolerate: "2" + cachereservation: "20" + ``` + + [Download example](vsphere-volume-sc-vsancapabilities-with-datastore.yaml?raw=true) + + __Note: If you do not apply a storage policy during dynamic provisioning on a VSAN datastore, it will use a default Virtual SAN policy.__ + + Creating the storageclass: + + ``` bash + $ kubectl create -f examples/volumes/vsphere/vsphere-volume-sc-vsancapabilities.yaml + ``` + + Verifying storage class is created: + + ``` bash + $ kubectl describe storageclass fast + Name: fast + Annotations: + Provisioner: kubernetes.io/vsphere-volume + Parameters: diskformat=zeroedthick, hostFailuresToTolerate="2", cachereservation="20" + No events. + ``` + + 2. Create Persistent Volume Claim. + + See example: + + ```yaml + kind: PersistentVolumeClaim + apiVersion: v1 + metadata: + name: pvcsc-vsan + annotations: + volume.beta.kubernetes.io/storage-class: fast + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 2Gi + ``` + + [Download example](vsphere-volume-pvcsc.yaml?raw=true) + + Creating the persistent volume claim: + + ``` bash + $ kubectl create -f examples/volumes/vsphere/vsphere-volume-pvcsc.yaml + ``` + + Verifying persistent volume claim is created: + + ``` bash + $ kubectl describe pvc pvcsc-vsan + Name: pvcsc-vsan + Namespace: default + Status: Bound + Volume: pvc-80f7b5c1-94b6-11e6-a24f-005056a79d2d + Labels: + Capacity: 2Gi + Access Modes: RWO + No events. + ``` + + Persistent Volume is automatically created and is bounded to this pvc. + + Verifying persistent volume claim is created: + + ``` bash + $ kubectl describe pv pvc-80f7b5c1-94b6-11e6-a24f-005056a79d2d + Name: pvc-80f7b5c1-94b6-11e6-a24f-005056a79d2d + Labels: + Status: Bound + Claim: default/pvcsc-vsan + Reclaim Policy: Delete + Access Modes: RWO + Capacity: 2Gi + Message: + Source: + Type: vSphereVolume (a Persistent Disk resource in vSphere) + VolumePath: [VSANDatastore] kubevols/kubernetes-dynamic-pvc-80f7b5c1-94b6-11e6-a24f-005056a79d2d.vmdk + FSType: ext4 + No events. + ``` + + __Note: VMDK is created inside ```kubevols``` folder in datastore which is mentioned in 'vsphere' cloudprovider configuration. + The cloudprovider config is created during setup of Kubernetes cluster on vSphere.__ + + 3. Create Pod which uses Persistent Volume Claim with storage class. + + See example: + + ```yaml + apiVersion: v1 + kind: Pod + metadata: + name: pvpod + spec: + containers: + - name: test-container + image: gcr.io/google_containers/test-webserver + volumeMounts: + - name: test-volume + mountPath: /test + volumes: + - name: test-volume + persistentVolumeClaim: + claimName: pvcsc-vsan + ``` + + [Download example](vsphere-volume-pvcscpod.yaml?raw=true) + + Creating the pod: + + ``` bash + $ kubectl create -f examples/volumes/vsphere/vsphere-volume-pvcscpod.yaml + ``` + + Verifying pod is created: + + ``` bash + $ kubectl get pod pvpod + NAME READY STATUS RESTARTS AGE + pvpod 1/1 Running 0 48m + ``` + +### Stateful Set + +vSphere volumes can be consumed by Stateful Sets. + + 1. Create a storage class that will be used by the ```volumeClaimTemplates``` of a Stateful Set. + + See example: + + ```yaml + kind: StorageClass + apiVersion: storage.k8s.io/v1beta1 + metadata: + name: thin-disk + provisioner: kubernetes.io/vsphere-volume + parameters: + diskformat: thin + ``` + + [Download example](simple-storageclass.yaml) + + 2. Create a Stateful set that consumes storage from the Storage Class created. + + See example: + ```yaml + --- + apiVersion: v1 + kind: Service + metadata: + name: nginx + labels: + app: nginx + spec: + ports: + - port: 80 + name: web + clusterIP: None + selector: + app: nginx + --- + apiVersion: apps/v1beta1 + kind: StatefulSet + metadata: + name: web + spec: + serviceName: "nginx" + replicas: 14 + template: + metadata: + labels: + app: nginx + spec: + containers: + - name: nginx + image: gcr.io/google_containers/nginx-slim:0.8 + ports: + - containerPort: 80 + name: web + volumeMounts: + - name: www + mountPath: /usr/share/nginx/html + volumeClaimTemplates: + - metadata: + name: www + annotations: + volume.beta.kubernetes.io/storage-class: thin-disk + spec: + accessModes: [ "ReadWriteOnce" ] + resources: + requests: + storage: 1Gi + ``` + This will create Persistent Volume Claims for each replica and provision a volume for each claim if an existing volume could be bound to the claim. + [![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/examples/volumes/vsphere/README.md?pixel)]() diff --git a/vendor/k8s.io/kubernetes/examples/volumes/vsphere/vsphere-volume-sc-fast.yaml b/vendor/k8s.io/kubernetes/examples/volumes/vsphere/vsphere-volume-sc-fast.yaml index eac5049d..b2b436f8 100644 --- a/vendor/k8s.io/kubernetes/examples/volumes/vsphere/vsphere-volume-sc-fast.yaml +++ b/vendor/k8s.io/kubernetes/examples/volumes/vsphere/vsphere-volume-sc-fast.yaml @@ -4,4 +4,5 @@ metadata: name: fast provisioner: kubernetes.io/vsphere-volume parameters: - diskformat: zeroedthick \ No newline at end of file + diskformat: zeroedthick + fstype: ext3 \ No newline at end of file diff --git a/vendor/k8s.io/kubernetes/examples/volumes/vsphere/vsphere-volume-sc-vsancapabilities-with-datastore.yaml b/vendor/k8s.io/kubernetes/examples/volumes/vsphere/vsphere-volume-sc-vsancapabilities-with-datastore.yaml new file mode 100644 index 00000000..2fd93166 --- /dev/null +++ b/vendor/k8s.io/kubernetes/examples/volumes/vsphere/vsphere-volume-sc-vsancapabilities-with-datastore.yaml @@ -0,0 +1,10 @@ +kind: StorageClass +apiVersion: storage.k8s.io/v1beta1 +metadata: + name: fast +provisioner: kubernetes.io/vsphere-volume +parameters: + diskformat: zeroedthick + datastore: vsanDatastore + hostFailuresToTolerate: "2" + cachereservation: "20" diff --git a/vendor/k8s.io/kubernetes/examples/volumes/vsphere/vsphere-volume-sc-vsancapabilities.yaml b/vendor/k8s.io/kubernetes/examples/volumes/vsphere/vsphere-volume-sc-vsancapabilities.yaml new file mode 100644 index 00000000..ad2ff9d1 --- /dev/null +++ b/vendor/k8s.io/kubernetes/examples/volumes/vsphere/vsphere-volume-sc-vsancapabilities.yaml @@ -0,0 +1,9 @@ +kind: StorageClass +apiVersion: storage.k8s.io/v1beta1 +metadata: + name: fast +provisioner: kubernetes.io/vsphere-volume +parameters: + diskformat: zeroedthick + hostFailuresToTolerate: "2" + cachereservation: "20" diff --git a/vendor/k8s.io/kubernetes/federation/apis/openapi-spec/swagger.json b/vendor/k8s.io/kubernetes/federation/apis/openapi-spec/swagger.json index d2f65f2c..87ccc834 100644 --- a/vendor/k8s.io/kubernetes/federation/apis/openapi-spec/swagger.json +++ b/vendor/k8s.io/kubernetes/federation/apis/openapi-spec/swagger.json @@ -2,7 +2,7 @@ "swagger": "2.0", "info": { "title": "Generic API Server", - "version": "v1.6.1" + "version": "v1.6.4" }, "paths": { "/api/": { diff --git a/vendor/k8s.io/kubernetes/federation/pkg/federation-controller/configmap/configmap_controller.go b/vendor/k8s.io/kubernetes/federation/pkg/federation-controller/configmap/configmap_controller.go index bde25a39..3c9182be 100644 --- a/vendor/k8s.io/kubernetes/federation/pkg/federation-controller/configmap/configmap_controller.go +++ b/vendor/k8s.io/kubernetes/federation/pkg/federation-controller/configmap/configmap_controller.go @@ -169,7 +169,8 @@ func NewConfigMapController(client federationclientset.Interface) *ConfigMapCont }, func(client kubeclientset.Interface, obj pkgruntime.Object) error { configmap := obj.(*apiv1.ConfigMap) - err := client.Core().ConfigMaps(configmap.Namespace).Delete(configmap.Name, &metav1.DeleteOptions{}) + orphanDependents := false + err := client.Core().ConfigMaps(configmap.Namespace).Delete(configmap.Name, &metav1.DeleteOptions{OrphanDependents: &orphanDependents}) return err }) @@ -202,13 +203,13 @@ func (configmapcontroller *ConfigMapController) hasFinalizerFunc(obj pkgruntime. return false } -// removeFinalizerFunc removes the finalizer from the given objects ObjectMeta. Assumes that the given object is a configmap. -func (configmapcontroller *ConfigMapController) removeFinalizerFunc(obj pkgruntime.Object, finalizer string) (pkgruntime.Object, error) { +// removeFinalizerFunc removes the given finalizers from the given objects ObjectMeta. Assumes that the given object is a configmap. +func (configmapcontroller *ConfigMapController) removeFinalizerFunc(obj pkgruntime.Object, finalizers []string) (pkgruntime.Object, error) { configmap := obj.(*apiv1.ConfigMap) newFinalizers := []string{} hasFinalizer := false for i := range configmap.ObjectMeta.Finalizers { - if string(configmap.ObjectMeta.Finalizers[i]) != finalizer { + if !deletionhelper.ContainsString(finalizers, configmap.ObjectMeta.Finalizers[i]) { newFinalizers = append(newFinalizers, configmap.ObjectMeta.Finalizers[i]) } else { hasFinalizer = true @@ -221,7 +222,7 @@ func (configmapcontroller *ConfigMapController) removeFinalizerFunc(obj pkgrunti configmap.ObjectMeta.Finalizers = newFinalizers configmap, err := configmapcontroller.federatedApiClient.Core().ConfigMaps(configmap.Namespace).Update(configmap) if err != nil { - return nil, fmt.Errorf("failed to remove finalizer %s from configmap %s: %v", finalizer, configmap.Name, err) + return nil, fmt.Errorf("failed to remove finalizers %v from configmap %s: %v", finalizers, configmap.Name, err) } return configmap, nil } diff --git a/vendor/k8s.io/kubernetes/federation/pkg/federation-controller/daemonset/daemonset_controller.go b/vendor/k8s.io/kubernetes/federation/pkg/federation-controller/daemonset/daemonset_controller.go index 5d2bb7e1..79fd5ca1 100644 --- a/vendor/k8s.io/kubernetes/federation/pkg/federation-controller/daemonset/daemonset_controller.go +++ b/vendor/k8s.io/kubernetes/federation/pkg/federation-controller/daemonset/daemonset_controller.go @@ -182,7 +182,8 @@ func NewDaemonSetController(client federationclientset.Interface) *DaemonSetCont func(client kubeclientset.Interface, obj pkgruntime.Object) error { daemonset := obj.(*extensionsv1.DaemonSet) glog.V(4).Infof("Attempting to delete daemonset: %s/%s", daemonset.Namespace, daemonset.Name) - err := client.Extensions().DaemonSets(daemonset.Namespace).Delete(daemonset.Name, &metav1.DeleteOptions{}) + orphanDependents := false + err := client.Extensions().DaemonSets(daemonset.Namespace).Delete(daemonset.Name, &metav1.DeleteOptions{OrphanDependents: &orphanDependents}) if err != nil { glog.Errorf("Error deleting daemonset %s/%s/: %v", daemonset.Namespace, daemonset.Name, err) } else { @@ -220,14 +221,14 @@ func (daemonsetcontroller *DaemonSetController) hasFinalizerFunc(obj pkgruntime. return false } -// Removes the finalizer from the given objects ObjectMeta. +// Removes the finalizers from the given objects ObjectMeta. // Assumes that the given object is a daemonset. -func (daemonsetcontroller *DaemonSetController) removeFinalizerFunc(obj pkgruntime.Object, finalizer string) (pkgruntime.Object, error) { +func (daemonsetcontroller *DaemonSetController) removeFinalizerFunc(obj pkgruntime.Object, finalizers []string) (pkgruntime.Object, error) { daemonset := obj.(*extensionsv1.DaemonSet) newFinalizers := []string{} hasFinalizer := false for i := range daemonset.ObjectMeta.Finalizers { - if string(daemonset.ObjectMeta.Finalizers[i]) != finalizer { + if !deletionhelper.ContainsString(finalizers, daemonset.ObjectMeta.Finalizers[i]) { newFinalizers = append(newFinalizers, daemonset.ObjectMeta.Finalizers[i]) } else { hasFinalizer = true @@ -240,7 +241,7 @@ func (daemonsetcontroller *DaemonSetController) removeFinalizerFunc(obj pkgrunti daemonset.ObjectMeta.Finalizers = newFinalizers daemonset, err := daemonsetcontroller.federatedApiClient.Extensions().DaemonSets(daemonset.Namespace).Update(daemonset) if err != nil { - return nil, fmt.Errorf("failed to remove finalizer %s from daemonset %s: %v", finalizer, daemonset.Name, err) + return nil, fmt.Errorf("failed to remove finalizers %v from daemonset %s: %v", finalizers, daemonset.Name, err) } return daemonset, nil } diff --git a/vendor/k8s.io/kubernetes/federation/pkg/federation-controller/deployment/deploymentcontroller.go b/vendor/k8s.io/kubernetes/federation/pkg/federation-controller/deployment/deploymentcontroller.go index 0493811e..328708b1 100644 --- a/vendor/k8s.io/kubernetes/federation/pkg/federation-controller/deployment/deploymentcontroller.go +++ b/vendor/k8s.io/kubernetes/federation/pkg/federation-controller/deployment/deploymentcontroller.go @@ -201,7 +201,8 @@ func NewDeploymentController(federationClient fedclientset.Interface) *Deploymen }, func(client kubeclientset.Interface, obj runtime.Object) error { rs := obj.(*extensionsv1.Deployment) - err := client.Extensions().Deployments(rs.Namespace).Delete(rs.Name, &metav1.DeleteOptions{}) + orphanDependents := false + err := client.Extensions().Deployments(rs.Namespace).Delete(rs.Name, &metav1.DeleteOptions{OrphanDependents: &orphanDependents}) return err }) @@ -234,14 +235,14 @@ func (fdc *DeploymentController) hasFinalizerFunc(obj runtime.Object, finalizer return false } -// Removes the finalizer from the given objects ObjectMeta. +// Removes the finalizers from the given objects ObjectMeta. // Assumes that the given object is a deployment. -func (fdc *DeploymentController) removeFinalizerFunc(obj runtime.Object, finalizer string) (runtime.Object, error) { +func (fdc *DeploymentController) removeFinalizerFunc(obj runtime.Object, finalizers []string) (runtime.Object, error) { deployment := obj.(*extensionsv1.Deployment) newFinalizers := []string{} hasFinalizer := false for i := range deployment.ObjectMeta.Finalizers { - if string(deployment.ObjectMeta.Finalizers[i]) != finalizer { + if !deletionhelper.ContainsString(finalizers, deployment.ObjectMeta.Finalizers[i]) { newFinalizers = append(newFinalizers, deployment.ObjectMeta.Finalizers[i]) } else { hasFinalizer = true @@ -254,7 +255,7 @@ func (fdc *DeploymentController) removeFinalizerFunc(obj runtime.Object, finaliz deployment.ObjectMeta.Finalizers = newFinalizers deployment, err := fdc.fedClient.Extensions().Deployments(deployment.Namespace).Update(deployment) if err != nil { - return nil, fmt.Errorf("failed to remove finalizer %s from deployment %s: %v", finalizer, deployment.Name, err) + return nil, fmt.Errorf("failed to remove finalizers %v from deployment %s: %v", finalizers, deployment.Name, err) } return deployment, nil } diff --git a/vendor/k8s.io/kubernetes/federation/pkg/federation-controller/ingress/ingress_controller.go b/vendor/k8s.io/kubernetes/federation/pkg/federation-controller/ingress/ingress_controller.go index d22a0185..74e727b0 100644 --- a/vendor/k8s.io/kubernetes/federation/pkg/federation-controller/ingress/ingress_controller.go +++ b/vendor/k8s.io/kubernetes/federation/pkg/federation-controller/ingress/ingress_controller.go @@ -255,7 +255,8 @@ func NewIngressController(client federationclientset.Interface) *IngressControll func(client kubeclientset.Interface, obj pkgruntime.Object) error { ingress := obj.(*extensionsv1beta1.Ingress) glog.V(4).Infof("Attempting to delete Ingress: %v", ingress) - err := client.Extensions().Ingresses(ingress.Namespace).Delete(ingress.Name, &metav1.DeleteOptions{}) + orphanDependents := false + err := client.Extensions().Ingresses(ingress.Namespace).Delete(ingress.Name, &metav1.DeleteOptions{OrphanDependents: &orphanDependents}) return err }) @@ -316,14 +317,14 @@ func (ic *IngressController) hasFinalizerFunc(obj pkgruntime.Object, finalizer s return false } -// Removes the finalizer from the given objects ObjectMeta. +// Removes the finalizers from the given objects ObjectMeta. // Assumes that the given object is a ingress. -func (ic *IngressController) removeFinalizerFunc(obj pkgruntime.Object, finalizer string) (pkgruntime.Object, error) { +func (ic *IngressController) removeFinalizerFunc(obj pkgruntime.Object, finalizers []string) (pkgruntime.Object, error) { ingress := obj.(*extensionsv1beta1.Ingress) newFinalizers := []string{} hasFinalizer := false for i := range ingress.ObjectMeta.Finalizers { - if string(ingress.ObjectMeta.Finalizers[i]) != finalizer { + if !deletionhelper.ContainsString(finalizers, ingress.ObjectMeta.Finalizers[i]) { newFinalizers = append(newFinalizers, ingress.ObjectMeta.Finalizers[i]) } else { hasFinalizer = true @@ -336,7 +337,7 @@ func (ic *IngressController) removeFinalizerFunc(obj pkgruntime.Object, finalize ingress.ObjectMeta.Finalizers = newFinalizers ingress, err := ic.federatedApiClient.Extensions().Ingresses(ingress.Namespace).Update(ingress) if err != nil { - return nil, fmt.Errorf("failed to remove finalizer %s from ingress %s: %v", finalizer, ingress.Name, err) + return nil, fmt.Errorf("failed to remove finalizers %v from ingress %s: %v", finalizers, ingress.Name, err) } return ingress, nil } diff --git a/vendor/k8s.io/kubernetes/federation/pkg/federation-controller/namespace/namespace_controller.go b/vendor/k8s.io/kubernetes/federation/pkg/federation-controller/namespace/namespace_controller.go index 300e4779..9732a215 100644 --- a/vendor/k8s.io/kubernetes/federation/pkg/federation-controller/namespace/namespace_controller.go +++ b/vendor/k8s.io/kubernetes/federation/pkg/federation-controller/namespace/namespace_controller.go @@ -169,7 +169,8 @@ func NewNamespaceController(client federationclientset.Interface, dynamicClientP }, func(client kubeclientset.Interface, obj runtime.Object) error { namespace := obj.(*apiv1.Namespace) - err := client.Core().Namespaces().Delete(namespace.Name, &metav1.DeleteOptions{}) + orphanDependents := false + err := client.Core().Namespaces().Delete(namespace.Name, &metav1.DeleteOptions{OrphanDependents: &orphanDependents}) // IsNotFound error is fine since that means the object is deleted already. if errors.IsNotFound(err) { return nil @@ -210,14 +211,14 @@ func (nc *NamespaceController) hasFinalizerFunc(obj runtime.Object, finalizer st return false } -// Removes the finalizer from the given objects ObjectMeta. +// Removes the finalizers from the given objects ObjectMeta. // Assumes that the given object is a namespace. -func (nc *NamespaceController) removeFinalizerFunc(obj runtime.Object, finalizer string) (runtime.Object, error) { +func (nc *NamespaceController) removeFinalizerFunc(obj runtime.Object, finalizers []string) (runtime.Object, error) { namespace := obj.(*apiv1.Namespace) newFinalizers := []string{} hasFinalizer := false for i := range namespace.ObjectMeta.Finalizers { - if string(namespace.ObjectMeta.Finalizers[i]) != finalizer { + if !deletionhelper.ContainsString(finalizers, namespace.ObjectMeta.Finalizers[i]) { newFinalizers = append(newFinalizers, namespace.ObjectMeta.Finalizers[i]) } else { hasFinalizer = true @@ -230,7 +231,7 @@ func (nc *NamespaceController) removeFinalizerFunc(obj runtime.Object, finalizer namespace.ObjectMeta.Finalizers = newFinalizers namespace, err := nc.federatedApiClient.Core().Namespaces().Update(namespace) if err != nil { - return nil, fmt.Errorf("failed to remove finalizer %s from namespace %s: %v", finalizer, namespace.Name, err) + return nil, fmt.Errorf("failed to remove finalizers %v from namespace %s: %v", finalizers, namespace.Name, err) } return namespace, nil } diff --git a/vendor/k8s.io/kubernetes/federation/pkg/federation-controller/replicaset/replicasetcontroller.go b/vendor/k8s.io/kubernetes/federation/pkg/federation-controller/replicaset/replicasetcontroller.go index e29787fe..715c3613 100644 --- a/vendor/k8s.io/kubernetes/federation/pkg/federation-controller/replicaset/replicasetcontroller.go +++ b/vendor/k8s.io/kubernetes/federation/pkg/federation-controller/replicaset/replicasetcontroller.go @@ -209,7 +209,8 @@ func NewReplicaSetController(federationClient fedclientset.Interface) *ReplicaSe }, func(client kubeclientset.Interface, obj runtime.Object) error { rs := obj.(*extensionsv1.ReplicaSet) - err := client.Extensions().ReplicaSets(rs.Namespace).Delete(rs.Name, &metav1.DeleteOptions{}) + orphanDependents := false + err := client.Extensions().ReplicaSets(rs.Namespace).Delete(rs.Name, &metav1.DeleteOptions{OrphanDependents: &orphanDependents}) return err }) @@ -242,14 +243,14 @@ func (frsc *ReplicaSetController) hasFinalizerFunc(obj runtime.Object, finalizer return false } -// Removes the finalizer from the given objects ObjectMeta. +// Removes the finalizers from the given objects ObjectMeta. // Assumes that the given object is a replicaset. -func (frsc *ReplicaSetController) removeFinalizerFunc(obj runtime.Object, finalizer string) (runtime.Object, error) { +func (frsc *ReplicaSetController) removeFinalizerFunc(obj runtime.Object, finalizers []string) (runtime.Object, error) { replicaset := obj.(*extensionsv1.ReplicaSet) newFinalizers := []string{} hasFinalizer := false for i := range replicaset.ObjectMeta.Finalizers { - if string(replicaset.ObjectMeta.Finalizers[i]) != finalizer { + if !deletionhelper.ContainsString(finalizers, replicaset.ObjectMeta.Finalizers[i]) { newFinalizers = append(newFinalizers, replicaset.ObjectMeta.Finalizers[i]) } else { hasFinalizer = true @@ -262,7 +263,7 @@ func (frsc *ReplicaSetController) removeFinalizerFunc(obj runtime.Object, finali replicaset.ObjectMeta.Finalizers = newFinalizers replicaset, err := frsc.fedClient.Extensions().ReplicaSets(replicaset.Namespace).Update(replicaset) if err != nil { - return nil, fmt.Errorf("failed to remove finalizer %s from replicaset %s: %v", finalizer, replicaset.Name, err) + return nil, fmt.Errorf("failed to remove finalizers %v from replicaset %s: %v", finalizers, replicaset.Name, err) } return replicaset, nil } diff --git a/vendor/k8s.io/kubernetes/federation/pkg/federation-controller/secret/secret_controller.go b/vendor/k8s.io/kubernetes/federation/pkg/federation-controller/secret/secret_controller.go index a6a80e1d..c1aff44c 100644 --- a/vendor/k8s.io/kubernetes/federation/pkg/federation-controller/secret/secret_controller.go +++ b/vendor/k8s.io/kubernetes/federation/pkg/federation-controller/secret/secret_controller.go @@ -168,7 +168,8 @@ func NewSecretController(client federationclientset.Interface) *SecretController }, func(client kubeclientset.Interface, obj pkgruntime.Object) error { secret := obj.(*apiv1.Secret) - err := client.Core().Secrets(secret.Namespace).Delete(secret.Name, &metav1.DeleteOptions{}) + orphanDependents := false + err := client.Core().Secrets(secret.Namespace).Delete(secret.Name, &metav1.DeleteOptions{OrphanDependents: &orphanDependents}) return err }) @@ -201,14 +202,14 @@ func (secretcontroller *SecretController) hasFinalizerFunc(obj pkgruntime.Object return false } -// Removes the finalizer from the given objects ObjectMeta. +// Removes the finalizers from the given objects ObjectMeta. // Assumes that the given object is a secret. -func (secretcontroller *SecretController) removeFinalizerFunc(obj pkgruntime.Object, finalizer string) (pkgruntime.Object, error) { +func (secretcontroller *SecretController) removeFinalizerFunc(obj pkgruntime.Object, finalizers []string) (pkgruntime.Object, error) { secret := obj.(*apiv1.Secret) newFinalizers := []string{} hasFinalizer := false for i := range secret.ObjectMeta.Finalizers { - if string(secret.ObjectMeta.Finalizers[i]) != finalizer { + if !deletionhelper.ContainsString(finalizers, secret.ObjectMeta.Finalizers[i]) { newFinalizers = append(newFinalizers, secret.ObjectMeta.Finalizers[i]) } else { hasFinalizer = true @@ -221,7 +222,7 @@ func (secretcontroller *SecretController) removeFinalizerFunc(obj pkgruntime.Obj secret.ObjectMeta.Finalizers = newFinalizers secret, err := secretcontroller.federatedApiClient.Core().Secrets(secret.Namespace).Update(secret) if err != nil { - return nil, fmt.Errorf("failed to remove finalizer %s from secret %s: %v", finalizer, secret.Name, err) + return nil, fmt.Errorf("failed to remove finalizers %v from secret %s: %v", finalizers, secret.Name, err) } return secret, nil } diff --git a/vendor/k8s.io/kubernetes/federation/pkg/federation-controller/service/servicecontroller.go b/vendor/k8s.io/kubernetes/federation/pkg/federation-controller/service/servicecontroller.go index 78132ee0..02a33b8e 100644 --- a/vendor/k8s.io/kubernetes/federation/pkg/federation-controller/service/servicecontroller.go +++ b/vendor/k8s.io/kubernetes/federation/pkg/federation-controller/service/servicecontroller.go @@ -291,7 +291,8 @@ func New(federationClient fedclientset.Interface, dns dnsprovider.Interface, }, func(client kubeclientset.Interface, obj pkgruntime.Object) error { svc := obj.(*v1.Service) - err := client.Core().Services(svc.Namespace).Delete(svc.Name, &metav1.DeleteOptions{}) + orphanDependents := false + err := client.Core().Services(svc.Namespace).Delete(svc.Name, &metav1.DeleteOptions{OrphanDependents: &orphanDependents}) return err }) @@ -328,14 +329,14 @@ func (s *ServiceController) hasFinalizerFunc(obj pkgruntime.Object, finalizer st return false } -// Removes the finalizer from the given objects ObjectMeta. +// Removes the finalizers from the given objects ObjectMeta. // Assumes that the given object is a service. -func (s *ServiceController) removeFinalizerFunc(obj pkgruntime.Object, finalizer string) (pkgruntime.Object, error) { +func (s *ServiceController) removeFinalizerFunc(obj pkgruntime.Object, finalizers []string) (pkgruntime.Object, error) { service := obj.(*v1.Service) newFinalizers := []string{} hasFinalizer := false for i := range service.ObjectMeta.Finalizers { - if string(service.ObjectMeta.Finalizers[i]) != finalizer { + if !deletionhelper.ContainsString(finalizers, service.ObjectMeta.Finalizers[i]) { newFinalizers = append(newFinalizers, service.ObjectMeta.Finalizers[i]) } else { hasFinalizer = true @@ -348,7 +349,7 @@ func (s *ServiceController) removeFinalizerFunc(obj pkgruntime.Object, finalizer service.ObjectMeta.Finalizers = newFinalizers service, err := s.federationClient.Core().Services(service.Namespace).Update(service) if err != nil { - return nil, fmt.Errorf("failed to remove finalizer %s from service %s: %v", finalizer, service.Name, err) + return nil, fmt.Errorf("failed to remove finalizers %v from service %s: %v", finalizers, service.Name, err) } return service, nil } diff --git a/vendor/k8s.io/kubernetes/federation/pkg/federation-controller/util/deletionhelper/BUILD b/vendor/k8s.io/kubernetes/federation/pkg/federation-controller/util/deletionhelper/BUILD index 06ed9ea9..63c47bf9 100644 --- a/vendor/k8s.io/kubernetes/federation/pkg/federation-controller/util/deletionhelper/BUILD +++ b/vendor/k8s.io/kubernetes/federation/pkg/federation-controller/util/deletionhelper/BUILD @@ -9,7 +9,10 @@ load( go_library( name = "go_default_library", - srcs = ["deletion_helper.go"], + srcs = [ + "deletion_helper.go", + "util.go", + ], tags = ["automanaged"], deps = [ "//federation/pkg/federation-controller/util:go_default_library", diff --git a/vendor/k8s.io/kubernetes/federation/pkg/federation-controller/util/deletionhelper/deletion_helper.go b/vendor/k8s.io/kubernetes/federation/pkg/federation-controller/util/deletionhelper/deletion_helper.go index 06adf833..62cdba85 100644 --- a/vendor/k8s.io/kubernetes/federation/pkg/federation-controller/util/deletionhelper/deletion_helper.go +++ b/vendor/k8s.io/kubernetes/federation/pkg/federation-controller/util/deletionhelper/deletion_helper.go @@ -45,7 +45,7 @@ const ( ) type HasFinalizerFunc func(runtime.Object, string) bool -type RemoveFinalizerFunc func(runtime.Object, string) (runtime.Object, error) +type RemoveFinalizerFunc func(runtime.Object, []string) (runtime.Object, error) type AddFinalizerFunc func(runtime.Object, []string) (runtime.Object, error) type ObjNameFunc func(runtime.Object) string @@ -123,11 +123,8 @@ func (dh *DeletionHelper) HandleObjectInUnderlyingClusters(obj runtime.Object) ( // If the obj has FinalizerOrphan finalizer, then we need to orphan the // corresponding objects in underlying clusters. // Just remove both the finalizers in that case. - obj, err := dh.removeFinalizerFunc(obj, FinalizerDeleteFromUnderlyingClusters) - if err != nil { - return obj, err - } - return dh.removeFinalizerFunc(obj, metav1.FinalizerOrphanDependents) + finalizers := []string{FinalizerDeleteFromUnderlyingClusters, metav1.FinalizerOrphanDependents} + return dh.removeFinalizerFunc(obj, finalizers) } glog.V(2).Infof("Deleting obj %s from underlying clusters", objName) @@ -183,5 +180,5 @@ func (dh *DeletionHelper) HandleObjectInUnderlyingClusters(obj runtime.Object) ( } // All done. Just remove the finalizer. - return dh.removeFinalizerFunc(obj, FinalizerDeleteFromUnderlyingClusters) + return dh.removeFinalizerFunc(obj, []string{FinalizerDeleteFromUnderlyingClusters}) } diff --git a/vendor/k8s.io/kubernetes/federation/pkg/federation-controller/util/deletionhelper/util.go b/vendor/k8s.io/kubernetes/federation/pkg/federation-controller/util/deletionhelper/util.go new file mode 100644 index 00000000..e4b6f5f1 --- /dev/null +++ b/vendor/k8s.io/kubernetes/federation/pkg/federation-controller/util/deletionhelper/util.go @@ -0,0 +1,28 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package deletionhelper + +// ContainsString returns true if the given string slice contains the given string. +// Returns false otherwise. +func ContainsString(arr []string, s string) bool { + for i := range arr { + if arr[i] == s { + return true + } + } + return false +} diff --git a/vendor/k8s.io/kubernetes/federation/pkg/kubefed/init/init.go b/vendor/k8s.io/kubernetes/federation/pkg/kubefed/init/init.go index cb81da64..0f30be84 100644 --- a/vendor/k8s.io/kubernetes/federation/pkg/kubefed/init/init.go +++ b/vendor/k8s.io/kubernetes/federation/pkg/kubefed/init/init.go @@ -922,7 +922,7 @@ func marshallOverrides(overrideArgString string) (map[string]string, error) { argsMap := make(map[string]string) overrideArgs := strings.Split(overrideArgString, ",") for _, overrideArg := range overrideArgs { - splitArg := strings.Split(overrideArg, "=") + splitArg := strings.SplitN(overrideArg, "=", 2) if len(splitArg) != 2 { return nil, fmt.Errorf("wrong format for override arg: %s", overrideArg) } diff --git a/vendor/k8s.io/kubernetes/federation/pkg/kubefed/init/init_test.go b/vendor/k8s.io/kubernetes/federation/pkg/kubefed/init/init_test.go index 17f0959a..ba1a9650 100644 --- a/vendor/k8s.io/kubernetes/federation/pkg/kubefed/init/init_test.go +++ b/vendor/k8s.io/kubernetes/federation/pkg/kubefed/init/init_test.go @@ -322,8 +322,10 @@ func TestMarshallAndMergeOverrides(t *testing.T) { expectedErr: "wrong format for override arg: wrong-format-arg", }, { - overrideParams: "wrong-format-arg=override=wrong-format-arg=override", - expectedErr: "wrong format for override arg: wrong-format-arg=override=wrong-format-arg=override", + // TODO: Multiple arg values separated by , are not supported yet + overrideParams: "multiple-equalto-char=first-key=1", + expectedSet: sets.NewString("arg2=val2", "arg1=val1", "multiple-equalto-char=first-key=1"), + expectedErr: "", }, { overrideParams: "=wrong-format-only-value", diff --git a/vendor/k8s.io/kubernetes/federation/pkg/kubefed/unjoin.go b/vendor/k8s.io/kubernetes/federation/pkg/kubefed/unjoin.go index 25d173be..bb40694a 100644 --- a/vendor/k8s.io/kubernetes/federation/pkg/kubefed/unjoin.go +++ b/vendor/k8s.io/kubernetes/federation/pkg/kubefed/unjoin.go @@ -194,7 +194,8 @@ func deleteConfigMapFromCluster(hostClientset internalclientset.Interface, secre // deleteSecret deletes the secret with the given name from the host // cluster. func deleteSecret(clientset internalclientset.Interface, name, namespace string) error { - return clientset.Core().Secrets(namespace).Delete(name, &metav1.DeleteOptions{}) + orphanDependents := false + return clientset.Core().Secrets(namespace).Delete(name, &metav1.DeleteOptions{OrphanDependents: &orphanDependents}) } // isNotFound checks if the given error is a NotFound status error. diff --git a/vendor/k8s.io/kubernetes/hack/make-rules/test-cmd.sh b/vendor/k8s.io/kubernetes/hack/make-rules/test-cmd.sh index 8c1e31dd..430cfdf6 100755 --- a/vendor/k8s.io/kubernetes/hack/make-rules/test-cmd.sh +++ b/vendor/k8s.io/kubernetes/hack/make-rules/test-cmd.sh @@ -36,10 +36,14 @@ function run_kube_apiserver() { # Admission Controllers to invoke prior to persisting objects in cluster ADMISSION_CONTROL="NamespaceLifecycle,LimitRanger,ResourceQuota" + # Include RBAC (to exercise bootstrapping), and AlwaysAllow to allow all actions + AUTHORIZATION_MODE="RBAC,AlwaysAllow" + "${KUBE_OUTPUT_HOSTBIN}/kube-apiserver" \ --address="127.0.0.1" \ --public-address-override="127.0.0.1" \ --port="${API_PORT}" \ + --authorization-mode="${AUTHORIZATION_MODE}" \ --admission-control="${ADMISSION_CONTROL}" \ --etcd-servers="http://${ETCD_HOST}:${ETCD_PORT}" \ --public-address-override="127.0.0.1" \ diff --git a/vendor/k8s.io/kubernetes/pkg/api/v1/helpers.go b/vendor/k8s.io/kubernetes/pkg/api/v1/helpers.go index 83495de0..e2002c7c 100644 --- a/vendor/k8s.io/kubernetes/pkg/api/v1/helpers.go +++ b/vendor/k8s.io/kubernetes/pkg/api/v1/helpers.go @@ -276,10 +276,10 @@ const ( AffinityAnnotationKey string = "scheduler.alpha.kubernetes.io/affinity" ) -// Tries to add a toleration to annotations list. Returns true if something was updated -// false otherwise. -func AddOrUpdateTolerationInPod(pod *Pod, toleration *Toleration) (bool, error) { - podTolerations := pod.Spec.Tolerations +// AddOrUpdateTolerationInPodSpec tries to add a toleration to the toleration list in PodSpec. +// Returns true if something was updated, false otherwise. +func AddOrUpdateTolerationInPodSpec(spec *PodSpec, toleration *Toleration) (bool, error) { + podTolerations := spec.Tolerations var newTolerations []Toleration updated := false @@ -300,10 +300,16 @@ func AddOrUpdateTolerationInPod(pod *Pod, toleration *Toleration) (bool, error) newTolerations = append(newTolerations, *toleration) } - pod.Spec.Tolerations = newTolerations + spec.Tolerations = newTolerations return true, nil } +// AddOrUpdateTolerationInPod tries to add a toleration to the pod's toleration list. +// Returns true if something was updated, false otherwise. +func AddOrUpdateTolerationInPod(pod *Pod, toleration *Toleration) (bool, error) { + return AddOrUpdateTolerationInPodSpec(&pod.Spec, toleration) +} + // MatchToleration checks if the toleration matches tolerationToMatch. Tolerations are unique by , // if the two tolerations have same combination, regard as they match. // TODO: uniqueness check for tolerations in api validations. diff --git a/vendor/k8s.io/kubernetes/pkg/apis/batch/types.go b/vendor/k8s.io/kubernetes/pkg/apis/batch/types.go index 2113dca1..d26ecf8e 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/batch/types.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/batch/types.go @@ -233,6 +233,7 @@ type CronJobSpec struct { StartingDeadlineSeconds *int64 // ConcurrencyPolicy specifies how to treat concurrent executions of a Job. + // Defaults to Allow. // +optional ConcurrencyPolicy ConcurrencyPolicy diff --git a/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/defaults_test.go b/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/defaults_test.go index c300a72f..1dda456d 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/defaults_test.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/defaults_test.go @@ -41,7 +41,7 @@ func TestSetDefaultCronJob(t *testing.T) { }, }, }, - "nothing should be defaulted": { + "set fields should not be defaulted": { original: &CronJob{ Spec: CronJobSpec{ ConcurrencyPolicy: ForbidConcurrent, diff --git a/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/generated.proto b/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/generated.proto index 4f51d616..605dafe3 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/generated.proto +++ b/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/generated.proto @@ -72,6 +72,7 @@ message CronJobSpec { optional int64 startingDeadlineSeconds = 2; // ConcurrencyPolicy specifies how to treat concurrent executions of a Job. + // Defaults to Allow. // +optional optional string concurrencyPolicy = 3; diff --git a/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/types.go b/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/types.go index 3c1fdf22..e22e32f2 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/types.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/types.go @@ -94,6 +94,7 @@ type CronJobSpec struct { StartingDeadlineSeconds *int64 `json:"startingDeadlineSeconds,omitempty" protobuf:"varint,2,opt,name=startingDeadlineSeconds"` // ConcurrencyPolicy specifies how to treat concurrent executions of a Job. + // Defaults to Allow. // +optional ConcurrencyPolicy ConcurrencyPolicy `json:"concurrencyPolicy,omitempty" protobuf:"bytes,3,opt,name=concurrencyPolicy,casttype=ConcurrencyPolicy"` diff --git a/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/types_swagger_doc_generated.go index c0b53b8e..dc4ff64c 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/types_swagger_doc_generated.go @@ -52,7 +52,7 @@ var map_CronJobSpec = map[string]string{ "": "CronJobSpec describes how the job execution will look like and when it will actually run.", "schedule": "Schedule contains the schedule in Cron format, see https://en.wikipedia.org/wiki/Cron.", "startingDeadlineSeconds": "Optional deadline in seconds for starting the job if it misses scheduled time for any reason. Missed jobs executions will be counted as failed ones.", - "concurrencyPolicy": "ConcurrencyPolicy specifies how to treat concurrent executions of a Job.", + "concurrencyPolicy": "ConcurrencyPolicy specifies how to treat concurrent executions of a Job. Defaults to Allow.", "suspend": "Suspend flag tells the controller to suspend subsequent executions, it does not apply to already started executions. Defaults to false.", "jobTemplate": "JobTemplate is the object that describes the job that will be created when executing a CronJob.", "successfulJobsHistoryLimit": "The number of successful finished jobs to retain. This is a pointer to distinguish between explicit zero and not specified.", diff --git a/vendor/k8s.io/kubernetes/pkg/apis/storage/util/helpers.go b/vendor/k8s.io/kubernetes/pkg/apis/storage/util/helpers.go index 75caea06..62c27ccf 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/storage/util/helpers.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/storage/util/helpers.go @@ -20,8 +20,8 @@ import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" // IsDefaultStorageClassAnnotation represents a StorageClass annotation that // marks a class as the default StorageClass -//TODO: Update IsDefaultStorageClassannotation and remove Beta when no longer used -const IsDefaultStorageClassAnnotation = "storageclass.beta.kubernetes.io/is-default-class" +//TODO: remove Beta when no longer used +const IsDefaultStorageClassAnnotation = "storageclass.kubernetes.io/is-default-class" const BetaIsDefaultStorageClassAnnotation = "storageclass.beta.kubernetes.io/is-default-class" // IsDefaultAnnotationText returns a pretty Yes/No String if diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/aws.go b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/aws.go index 8d52f0c9..296eeb3f 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/aws.go +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/aws.go @@ -392,6 +392,10 @@ type CloudConfig struct { // on a different aws account, on a different cloud provider or on-premise. // If the flag is set also the KubernetesClusterTag must be provided VPC string + // SubnetID enables using a specific subnet to use for ELB's + SubnetID string + // RouteTableID enables using a specific RouteTable + RouteTableID string // KubernetesClusterTag is the legacy cluster id we'll use to identify our cluster resources KubernetesClusterTag string @@ -817,13 +821,14 @@ func newAWSCloud(config io.Reader, awsServices Services) (*Cloud, error) { deviceAllocators: make(map[types.NodeName]DeviceAllocator), } - if cfg.Global.VPC != "" && cfg.Global.KubernetesClusterTag != "" { + if cfg.Global.VPC != "" && cfg.Global.SubnetID != "" && (cfg.Global.KubernetesClusterTag != "" || cfg.Global.KubernetesClusterID != "") { // When the master is running on a different AWS account, cloud provider or on-premise // build up a dummy instance and use the VPC from the nodes account - glog.Info("Master is configured to run on a AWS account, different cloud provider or on-premise") + glog.Info("Master is configured to run on a different AWS account, different cloud provider or on-premise") awsCloud.selfAWSInstance = &awsInstance{ nodeName: "master-dummy", vpcID: cfg.Global.VPC, + subnetID: cfg.Global.SubnetID, } awsCloud.vpcID = cfg.Global.VPC } else { diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/aws_routes.go b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/aws_routes.go index 4246849e..fffcdf42 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/aws_routes.go +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/aws_routes.go @@ -29,17 +29,27 @@ func (c *Cloud) findRouteTable(clusterName string) (*ec2.RouteTable, error) { // This should be unnecessary (we already filter on TagNameKubernetesCluster, // and something is broken if cluster name doesn't match, but anyway... // TODO: All clouds should be cluster-aware by default - request := &ec2.DescribeRouteTablesInput{Filters: c.tagging.addFilters(nil)} - - response, err := c.ec2.DescribeRouteTables(request) - if err != nil { - return nil, err - } - var tables []*ec2.RouteTable - for _, table := range response { - if c.tagging.hasClusterTag(table.Tags) { - tables = append(tables, table) + + if c.cfg.Global.RouteTableID != "" { + request := &ec2.DescribeRouteTablesInput{Filters: []*ec2.Filter{newEc2Filter("route-table-id", c.cfg.Global.RouteTableID)}} + response, err := c.ec2.DescribeRouteTables(request) + if err != nil { + return nil, err + } + + tables = response + } else { + request := &ec2.DescribeRouteTablesInput{Filters: c.tagging.addFilters(nil)} + response, err := c.ec2.DescribeRouteTables(request) + if err != nil { + return nil, err + } + + for _, table := range response { + if c.tagging.hasClusterTag(table.Tags) { + tables = append(tables, table) + } } } diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/BUILD b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/BUILD index 53b3631e..1b7f6b46 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/BUILD @@ -28,6 +28,7 @@ go_library( "//pkg/api/v1:go_default_library", "//pkg/api/v1/service:go_default_library", "//pkg/cloudprovider:go_default_library", + "//pkg/version:go_default_library", "//pkg/volume:go_default_library", "//vendor:github.com/Azure/azure-sdk-for-go/arm/compute", "//vendor:github.com/Azure/azure-sdk-for-go/arm/network", diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure.go b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure.go index cc03b5d9..7e2f016e 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure.go +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure.go @@ -17,14 +17,17 @@ limitations under the License. package azure import ( + "fmt" "io" "io/ioutil" "k8s.io/kubernetes/pkg/cloudprovider" + "k8s.io/kubernetes/pkg/version" "github.com/Azure/azure-sdk-for-go/arm/compute" "github.com/Azure/azure-sdk-for-go/arm/network" "github.com/Azure/azure-sdk-for-go/arm/storage" + "github.com/Azure/go-autorest/autorest" "github.com/Azure/go-autorest/autorest/azure" "github.com/ghodss/yaml" "time" @@ -125,38 +128,54 @@ func NewCloud(configReader io.Reader) (cloudprovider.Interface, error) { az.SubnetsClient = network.NewSubnetsClient(az.SubscriptionID) az.SubnetsClient.BaseURI = az.Environment.ResourceManagerEndpoint az.SubnetsClient.Authorizer = servicePrincipalToken + az.SubnetsClient.PollingDelay = 5 * time.Second + configureUserAgent(&az.SubnetsClient.Client) az.RouteTablesClient = network.NewRouteTablesClient(az.SubscriptionID) az.RouteTablesClient.BaseURI = az.Environment.ResourceManagerEndpoint az.RouteTablesClient.Authorizer = servicePrincipalToken + az.RouteTablesClient.PollingDelay = 5 * time.Second + configureUserAgent(&az.RouteTablesClient.Client) az.RoutesClient = network.NewRoutesClient(az.SubscriptionID) az.RoutesClient.BaseURI = az.Environment.ResourceManagerEndpoint az.RoutesClient.Authorizer = servicePrincipalToken + az.RoutesClient.PollingDelay = 5 * time.Second + configureUserAgent(&az.RoutesClient.Client) az.InterfacesClient = network.NewInterfacesClient(az.SubscriptionID) az.InterfacesClient.BaseURI = az.Environment.ResourceManagerEndpoint az.InterfacesClient.Authorizer = servicePrincipalToken + az.InterfacesClient.PollingDelay = 5 * time.Second + configureUserAgent(&az.InterfacesClient.Client) az.LoadBalancerClient = network.NewLoadBalancersClient(az.SubscriptionID) az.LoadBalancerClient.BaseURI = az.Environment.ResourceManagerEndpoint az.LoadBalancerClient.Authorizer = servicePrincipalToken + az.LoadBalancerClient.PollingDelay = 5 * time.Second + configureUserAgent(&az.LoadBalancerClient.Client) az.VirtualMachinesClient = compute.NewVirtualMachinesClient(az.SubscriptionID) az.VirtualMachinesClient.BaseURI = az.Environment.ResourceManagerEndpoint az.VirtualMachinesClient.Authorizer = servicePrincipalToken az.VirtualMachinesClient.PollingDelay = 5 * time.Second + configureUserAgent(&az.VirtualMachinesClient.Client) az.PublicIPAddressesClient = network.NewPublicIPAddressesClient(az.SubscriptionID) az.PublicIPAddressesClient.BaseURI = az.Environment.ResourceManagerEndpoint az.PublicIPAddressesClient.Authorizer = servicePrincipalToken + az.PublicIPAddressesClient.PollingDelay = 5 * time.Second + configureUserAgent(&az.PublicIPAddressesClient.Client) az.SecurityGroupsClient = network.NewSecurityGroupsClient(az.SubscriptionID) az.SecurityGroupsClient.BaseURI = az.Environment.ResourceManagerEndpoint az.SecurityGroupsClient.Authorizer = servicePrincipalToken + az.SecurityGroupsClient.PollingDelay = 5 * time.Second + configureUserAgent(&az.SecurityGroupsClient.Client) az.StorageAccountClient = storage.NewAccountsClientWithBaseURI(az.Environment.ResourceManagerEndpoint, az.SubscriptionID) az.StorageAccountClient.Authorizer = servicePrincipalToken + return &az, nil } @@ -194,3 +213,8 @@ func (az *Cloud) ScrubDNS(nameservers, searches []string) (nsOut, srchOut []stri func (az *Cloud) ProviderName() string { return CloudProviderName } + +func configureUserAgent(client *autorest.Client) { + k8sVersion := version.Get().GitVersion + client.UserAgent = fmt.Sprintf("%s; %s", client.UserAgent, k8sVersion) +} diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vsphere.go b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vsphere.go index e6f6bd3a..2e246aa5 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vsphere.go +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vsphere.go @@ -24,9 +24,11 @@ import ( "net/url" "path" "path/filepath" + "regexp" "runtime" "strings" "sync" + "time" "gopkg.in/gcfg.v1" @@ -49,23 +51,33 @@ import ( ) const ( - ProviderName = "vsphere" - ActivePowerState = "poweredOn" - SCSIControllerType = "scsi" - LSILogicControllerType = "lsiLogic" - BusLogicControllerType = "busLogic" - PVSCSIControllerType = "pvscsi" - LSILogicSASControllerType = "lsiLogic-sas" - SCSIControllerLimit = 4 - SCSIControllerDeviceLimit = 15 - SCSIDeviceSlots = 16 - SCSIReservedSlot = 7 - ThinDiskType = "thin" - PreallocatedDiskType = "preallocated" - EagerZeroedThickDiskType = "eagerZeroedThick" - ZeroedThickDiskType = "zeroedThick" - VolDir = "kubevols" - RoundTripperDefaultCount = 3 + ProviderName = "vsphere" + ActivePowerState = "poweredOn" + SCSIControllerType = "scsi" + LSILogicControllerType = "lsiLogic" + BusLogicControllerType = "busLogic" + PVSCSIControllerType = "pvscsi" + LSILogicSASControllerType = "lsiLogic-sas" + SCSIControllerLimit = 4 + SCSIControllerDeviceLimit = 15 + SCSIDeviceSlots = 16 + SCSIReservedSlot = 7 + ThinDiskType = "thin" + PreallocatedDiskType = "preallocated" + EagerZeroedThickDiskType = "eagerZeroedThick" + ZeroedThickDiskType = "zeroedThick" + VolDir = "kubevols" + RoundTripperDefaultCount = 3 + DummyVMPrefixName = "vsphere-k8s" + VSANDatastoreType = "vsan" + MAC_OUI_VC = "00:50:56" + MAC_OUI_ESX = "00:0c:29" + DiskNotFoundErrMsg = "No vSphere disk ID found" + NoDiskUUIDFoundErrMsg = "No disk UUID found" + NoDevicesFoundErrMsg = "No devices found" + NonSupportedControllerTypeErrMsg = "Disk is attached to non-supported controller type" + FileAlreadyExistErrMsg = "File requested already exist" + CleanUpDummyVMRoutine_Interval = 5 ) // Controller types that are currently supported for hot attach of disks @@ -85,14 +97,17 @@ var diskFormatValidType = map[string]string{ } var DiskformatValidOptions = generateDiskFormatValidOptions() +var cleanUpRoutineInitialized = false -var ErrNoDiskUUIDFound = errors.New("No disk UUID found") -var ErrNoDiskIDFound = errors.New("No vSphere disk ID found") -var ErrNoDevicesFound = errors.New("No devices found") -var ErrNonSupportedControllerType = errors.New("Disk is attached to non-supported controller type") -var ErrFileAlreadyExist = errors.New("File requested already exist") +var ErrNoDiskUUIDFound = errors.New(NoDiskUUIDFoundErrMsg) +var ErrNoDiskIDFound = errors.New(DiskNotFoundErrMsg) +var ErrNoDevicesFound = errors.New(NoDevicesFoundErrMsg) +var ErrNonSupportedControllerType = errors.New(NonSupportedControllerTypeErrMsg) +var ErrFileAlreadyExist = errors.New(FileAlreadyExistErrMsg) var clientLock sync.Mutex +var cleanUpRoutineInitLock sync.Mutex +var cleanUpDummyVMLock sync.RWMutex // VSphere is an implementation of cloud provider Interface for VSphere. type VSphere struct { @@ -166,11 +181,12 @@ type Volumes interface { // VolumeOptions specifies capacity, tags, name and diskFormat for a volume. type VolumeOptions struct { - CapacityKB int - Tags map[string]string - Name string - DiskFormat string - Datastore string + CapacityKB int + Tags map[string]string + Name string + DiskFormat string + Datastore string + StorageProfileData string } // Generates Valid Options for Diskformat @@ -687,6 +703,8 @@ func cleanUpController(ctx context.Context, newSCSIController types.BaseVirtualD // Attaches given virtual disk volume to the compute running kubelet. func (vs *VSphere) AttachDisk(vmDiskPath string, nodeName k8stypes.NodeName) (diskID string, diskUUID string, err error) { + var newSCSIController types.BaseVirtualDevice + // Create context ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -722,50 +740,24 @@ func (vs *VSphere) AttachDisk(vmDiskPath string, nodeName k8stypes.NodeName) (di var diskControllerType = vs.cfg.Disk.SCSIControllerType // find SCSI controller of particular type from VM devices - allSCSIControllers := getSCSIControllers(vmDevices) scsiControllersOfRequiredType := getSCSIControllersOfType(vmDevices, diskControllerType) scsiController := getAvailableSCSIController(scsiControllersOfRequiredType) - - var newSCSICreated = false - var newSCSIController types.BaseVirtualDevice - - // creating a scsi controller as there is none found of controller type defined + newSCSICreated := false if scsiController == nil { - if len(allSCSIControllers) >= SCSIControllerLimit { - // we reached the maximum number of controllers we can attach - return "", "", fmt.Errorf("SCSI Controller Limit of %d has been reached, cannot create another SCSI controller", SCSIControllerLimit) - } - glog.V(1).Infof("Creating a SCSI controller of %v type", diskControllerType) - newSCSIController, err := vmDevices.CreateSCSIController(diskControllerType) + newSCSIController, err = createAndAttachSCSIControllerToVM(ctx, vm, diskControllerType) if err != nil { - k8runtime.HandleError(fmt.Errorf("error creating new SCSI controller: %v", err)) - return "", "", err - } - configNewSCSIController := newSCSIController.(types.BaseVirtualSCSIController).GetVirtualSCSIController() - hotAndRemove := true - configNewSCSIController.HotAddRemove = &hotAndRemove - configNewSCSIController.SharedBus = types.VirtualSCSISharing(types.VirtualSCSISharingNoSharing) - - // add the scsi controller to virtual machine - err = vm.AddDevice(context.TODO(), newSCSIController) - if err != nil { - glog.V(1).Infof("cannot add SCSI controller to vm - %v", err) - // attempt clean up of scsi controller - if vmDevices, err := vm.Device(ctx); err == nil { - cleanUpController(ctx, newSCSIController, vmDevices, vm) - } + glog.Errorf("Failed to create SCSI controller for VM :%q with err: %+v", vm.Name(), err) return "", "", err } // verify scsi controller in virtual machine - vmDevices, err = vm.Device(ctx) + vmDevices, err := vm.Device(ctx) if err != nil { - // cannot cleanup if there is no device list return "", "", err } // Get VM device list - _, vmDevices, _, err := getVirtualMachineDevices(ctx, vs.cfg, vs.client, vSphereInstance) + _, vmDevices, _, err = getVirtualMachineDevices(ctx, vs.cfg, vs.client, vSphereInstance) if err != nil { glog.Errorf("cannot get vmDevices for VM err=%s", err) return "", "", fmt.Errorf("cannot get vmDevices for VM err=%s", err) @@ -798,7 +790,7 @@ func (vs *VSphere) AttachDisk(vmDiskPath string, nodeName k8stypes.NodeName) (di glog.Errorf("Failed while searching for datastore %+q. err %s", datastorePathObj.Datastore, err) return "", "", err } - + vmDiskPath = removeClusterFromVDiskPath(vmDiskPath) disk := vmDevices.CreateDisk(scsiController, ds.Reference(), vmDiskPath) unitNumber, err := getNextUnitNumber(vmDevices, scsiController) if err != nil { @@ -1045,6 +1037,7 @@ func checkDiskAttached(volPath string, vmdevices object.VirtualDeviceList, dc *o // Returns the object key that denotes the controller object to which vmdk is attached. func getVirtualDiskControllerKey(volPath string, vmDevices object.VirtualDeviceList, dc *object.Datacenter, client *govmomi.Client) (int32, error) { + volPath = removeClusterFromVDiskPath(volPath) volumeUUID, err := getVirtualDiskUUIDByPath(volPath, dc, client) if err != nil { @@ -1175,7 +1168,7 @@ func (vs *VSphere) DetachDisk(volPath string, nodeName k8stypes.NodeName) error if err != nil { return err } - + volPath = removeClusterFromVDiskPath(volPath) diskID, err := getVirtualDiskID(volPath, vmDevices, dc, vs.client) if err != nil { glog.Warningf("disk ID not found for %v ", volPath) @@ -1200,8 +1193,8 @@ func (vs *VSphere) DetachDisk(volPath string, nodeName k8stypes.NodeName) error // CreateVolume creates a volume of given size (in KiB). func (vs *VSphere) CreateVolume(volumeOptions *VolumeOptions) (volumePath string, err error) { - var diskFormat string var datastore string + var destVolPath string // Default datastore is the datastore in the vSphere config file that is used initialize vSphere cloud provider. if volumeOptions.Datastore == "" { @@ -1220,8 +1213,6 @@ func (vs *VSphere) CreateVolume(volumeOptions *VolumeOptions) (volumePath string " Valid options are %s.", volumeOptions.DiskFormat, DiskformatValidOptions) } - diskFormat = diskFormatValidType[volumeOptions.DiskFormat] - // Create context ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -1246,43 +1237,105 @@ func (vs *VSphere) CreateVolume(volumeOptions *VolumeOptions) (volumePath string return "", err } - // vmdks will be created inside kubevols directory - kubeVolsPath := filepath.Clean(ds.Path(VolDir)) + "/" - err = makeDirectoryInDatastore(vs.client, dc, kubeVolsPath, false) - if err != nil && err != ErrFileAlreadyExist { - glog.Errorf("Cannot create dir %#v. err %s", kubeVolsPath, err) - return "", err - } - glog.V(4).Infof("Created dir with path as %+q", kubeVolsPath) + // Create a disk with the VSAN storage capabilities specified in the volumeOptions.StorageProfileData. + // This is achieved by following steps: + // 1. Create dummy VM if not already present. + // 2. Add a new disk to the VM by performing VM reconfigure. + // 3. Detach the new disk from the dummy VM. + // 4. Delete the dummy VM. + if volumeOptions.StorageProfileData != "" { + // Check if the datastore is VSAN if any capability requirements are specified. + // VSphere cloud provider now only supports VSAN capabilities requirements + ok, err := checkIfDatastoreTypeIsVSAN(vs.client, ds) + if err != nil { + return "", fmt.Errorf("Failed while determining whether the datastore: %q"+ + " is VSAN or not.", datastore) + } + if !ok { + return "", fmt.Errorf("The specified datastore: %q is not a VSAN datastore."+ + " The policy parameters will work only with VSAN Datastore."+ + " So, please specify a valid VSAN datastore in Storage class definition.", datastore) + } - vmDiskPath := kubeVolsPath + volumeOptions.Name + ".vmdk" + // Acquire a read lock to ensure multiple PVC requests can be processed simultaneously. + cleanUpDummyVMLock.RLock() + defer cleanUpDummyVMLock.RUnlock() - // Create a virtual disk manager - virtualDiskManager := object.NewVirtualDiskManager(vs.client.Client) + // Create a new background routine that will delete any dummy VM's that are left stale. + // This routine will get executed for every 5 minutes and gets initiated only once in its entire lifetime. + cleanUpRoutineInitLock.Lock() + if !cleanUpRoutineInitialized { + go vs.cleanUpDummyVMs(DummyVMPrefixName) + cleanUpRoutineInitialized = true + } + cleanUpRoutineInitLock.Unlock() - // Create specification for new virtual disk - vmDiskSpec := &types.FileBackedVirtualDiskSpec{ - VirtualDiskSpec: types.VirtualDiskSpec{ - AdapterType: LSILogicControllerType, - DiskType: diskFormat, - }, - CapacityKb: int64(volumeOptions.CapacityKB), + // Check if the VM exists in kubernetes cluster folder. + // The kubernetes cluster folder - vs.cfg.Global.WorkingDir is where all the nodes in the kubernetes cluster are created. + dummyVMFullName := DummyVMPrefixName + "-" + volumeOptions.Name + vmRegex := vs.cfg.Global.WorkingDir + dummyVMFullName + dummyVM, err := f.VirtualMachine(ctx, vmRegex) + if err != nil { + // 1. Create a dummy VM and return the VM reference. + dummyVM, err = vs.createDummyVM(ctx, dc, ds, dummyVMFullName) + if err != nil { + return "", err + } + } + + // 2. Reconfigure the VM to attach the disk with the VSAN policy configured. + vmDiskPath, err := vs.createVirtualDiskWithPolicy(ctx, dc, ds, dummyVM, volumeOptions) + fileAlreadyExist := false + if err != nil { + vmDiskPath = filepath.Clean(ds.Path(VolDir)) + "/" + volumeOptions.Name + ".vmdk" + errorMessage := fmt.Sprintf("Cannot complete the operation because the file or folder %s already exists", vmDiskPath) + if errorMessage == err.Error() { + //Skip error and continue to detach the disk as the disk was already created on the datastore. + fileAlreadyExist = true + glog.V(1).Infof("File: %v already exists", vmDiskPath) + } else { + glog.Errorf("Failed to attach the disk to VM: %q with err: %+v", dummyVMFullName, err) + return "", err + } + } + + dummyVMNodeName := vmNameToNodeName(dummyVMFullName) + // 3. Detach the disk from the dummy VM. + err = vs.DetachDisk(vmDiskPath, dummyVMNodeName) + if err != nil { + if DiskNotFoundErrMsg == err.Error() && fileAlreadyExist { + // Skip error if disk was already detached from the dummy VM but still present on the datastore. + glog.V(1).Infof("File: %v is already detached", vmDiskPath) + } else { + glog.Errorf("Failed to detach the disk: %q from VM: %q with err: %+v", vmDiskPath, dummyVMFullName, err) + return "", fmt.Errorf("Failed to create the volume: %q with err: %+v", volumeOptions.Name, err) + } + } + + // 4. Delete the dummy VM + err = deleteVM(ctx, dummyVM) + if err != nil { + return "", fmt.Errorf("Failed to destroy the vm: %q with err: %+v", dummyVMFullName, err) + } + destVolPath = vmDiskPath + } else { + // Create a virtual disk directly if no VSAN storage capabilities are specified by the user. + destVolPath, err = createVirtualDisk(ctx, vs.client, dc, ds, volumeOptions) + if err != nil { + return "", fmt.Errorf("Failed to create the virtual disk having name: %+q with err: %+v", destVolPath, err) + } } - // Create virtual disk - task, err := virtualDiskManager.CreateVirtualDisk(ctx, vmDiskPath, dc, vmDiskSpec) - if err != nil { - return "", err + if filepath.Base(datastore) != datastore { + // If Datastore is within cluster, add cluster path to the destVolPath + destVolPath = strings.Replace(destVolPath, filepath.Base(datastore), datastore, 1) } - err = task.Wait(ctx) - if err != nil { - return "", err - } - - return vmDiskPath, nil + glog.V(1).Infof("VM Disk path is %+q", destVolPath) + return destVolPath, nil } // DeleteVolume deletes a volume given volume name. +// Also, deletes the folder where the volume resides. func (vs *VSphere) DeleteVolume(vmDiskPath string) error { // Create context ctx, cancel := context.WithCancel(context.Background()) @@ -1308,7 +1361,24 @@ func (vs *VSphere) DeleteVolume(vmDiskPath string) error { if filepath.Ext(vmDiskPath) != ".vmdk" { vmDiskPath += ".vmdk" } + + // Get the vmDisk Name + diskNameWithExt := path.Base(vmDiskPath) + diskName := strings.TrimSuffix(diskNameWithExt, filepath.Ext(diskNameWithExt)) + + // Search for the dummyVM if present and delete it. + dummyVMFullName := DummyVMPrefixName + "-" + diskName + vmRegex := vs.cfg.Global.WorkingDir + dummyVMFullName + dummyVM, err := f.VirtualMachine(ctx, vmRegex) + if err == nil { + err = deleteVM(ctx, dummyVM) + if err != nil { + return fmt.Errorf("Failed to destroy the vm: %q with err: %+v", dummyVMFullName, err) + } + } + // Delete virtual disk + vmDiskPath = removeClusterFromVDiskPath(vmDiskPath) task, err := virtualDiskManager.DeleteVirtualDisk(ctx, vmDiskPath, dc) if err != nil { return err @@ -1356,6 +1426,341 @@ func (vs *VSphere) NodeExists(c *govmomi.Client, nodeName k8stypes.NodeName) (bo return false, nil } +// A background routine which will be responsible for deleting stale dummy VM's. +func (vs *VSphere) cleanUpDummyVMs(dummyVMPrefix string) { + // Create context + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + for { + time.Sleep(CleanUpDummyVMRoutine_Interval * time.Minute) + // Ensure client is logged in and session is valid + err := vSphereLogin(ctx, vs) + if err != nil { + glog.V(4).Infof("[cleanUpDummyVMs] Unable to login to vSphere with err: %+v", err) + continue + } + + // Create a new finder + f := find.NewFinder(vs.client.Client, true) + + // Fetch and set data center + dc, err := f.Datacenter(ctx, vs.cfg.Global.Datacenter) + if err != nil { + glog.V(4).Infof("[cleanUpDummyVMs] Unable to fetch the datacenter: %q with err: %+v", vs.cfg.Global.Datacenter, err) + continue + } + f.SetDatacenter(dc) + + // Get the folder reference for global working directory where the dummy VM needs to be created. + vmFolder, err := getFolder(ctx, vs.client, vs.cfg.Global.Datacenter, vs.cfg.Global.WorkingDir) + if err != nil { + glog.V(4).Infof("[cleanUpDummyVMs] Unable to get the kubernetes folder: %q reference with err: %+v", vs.cfg.Global.WorkingDir, err) + continue + } + + // A write lock is acquired to make sure the cleanUp routine doesn't delete any VM's created by ongoing PVC requests. + cleanUpDummyVMLock.Lock() + dummyVMRefList, err := getDummyVMList(ctx, vs.client, vmFolder, dummyVMPrefix) + if err != nil { + glog.V(4).Infof("[cleanUpDummyVMs] Unable to get dummy VM list in the kubernetes cluster: %q reference with err: %+v", vs.cfg.Global.WorkingDir, err) + cleanUpDummyVMLock.Unlock() + continue + } + for _, dummyVMRef := range dummyVMRefList { + err = deleteVM(ctx, dummyVMRef) + if err != nil { + glog.V(4).Infof("[cleanUpDummyVMs] Unable to delete dummy VM: %q with err: %+v", dummyVMRef.Name(), err) + continue + } + } + cleanUpDummyVMLock.Unlock() + } +} + +// Get the dummy VM list from the kubernetes working directory. +func getDummyVMList(ctx context.Context, c *govmomi.Client, vmFolder *object.Folder, dummyVMPrefix string) ([]*object.VirtualMachine, error) { + vmFolders, err := vmFolder.Children(ctx) + if err != nil { + glog.V(4).Infof("Unable to retrieve the virtual machines from the kubernetes cluster: %+v", vmFolder) + return nil, err + } + + var dummyVMRefList []*object.VirtualMachine + pc := property.DefaultCollector(c.Client) + for _, vmFolder := range vmFolders { + if vmFolder.Reference().Type == "VirtualMachine" { + var vmRefs []types.ManagedObjectReference + var vmMorefs []mo.VirtualMachine + vmRefs = append(vmRefs, vmFolder.Reference()) + err = pc.Retrieve(ctx, vmRefs, []string{"name"}, &vmMorefs) + if err != nil { + return nil, err + } + if strings.HasPrefix(vmMorefs[0].Name, dummyVMPrefix) { + dummyVMRefList = append(dummyVMRefList, object.NewVirtualMachine(c.Client, vmRefs[0])) + } + } + } + return dummyVMRefList, nil +} + +func (vs *VSphere) createDummyVM(ctx context.Context, datacenter *object.Datacenter, datastore *object.Datastore, vmName string) (*object.VirtualMachine, error) { + // Create a virtual machine config spec with 1 SCSI adapter. + virtualMachineConfigSpec := types.VirtualMachineConfigSpec{ + Name: vmName, + Files: &types.VirtualMachineFileInfo{ + VmPathName: "[" + datastore.Name() + "]", + }, + NumCPUs: 1, + MemoryMB: 4, + DeviceChange: []types.BaseVirtualDeviceConfigSpec{ + &types.VirtualDeviceConfigSpec{ + Operation: types.VirtualDeviceConfigSpecOperationAdd, + Device: &types.ParaVirtualSCSIController{ + VirtualSCSIController: types.VirtualSCSIController{ + SharedBus: types.VirtualSCSISharingNoSharing, + VirtualController: types.VirtualController{ + BusNumber: 0, + VirtualDevice: types.VirtualDevice{ + Key: 1000, + }, + }, + }, + }, + }, + }, + } + + // Get the resource pool for current node. This is where dummy VM will be created. + resourcePool, err := vs.getCurrentNodeResourcePool(ctx, datacenter) + if err != nil { + return nil, err + } + + // Get the folder reference for global working directory where the dummy VM needs to be created. + vmFolder, err := getFolder(ctx, vs.client, vs.cfg.Global.Datacenter, vs.cfg.Global.WorkingDir) + if err != nil { + return nil, fmt.Errorf("Failed to get the folder reference for %q with err: %+v", vs.cfg.Global.WorkingDir, err) + } + + task, err := vmFolder.CreateVM(ctx, virtualMachineConfigSpec, resourcePool, nil) + if err != nil { + return nil, err + } + + dummyVMTaskInfo, err := task.WaitForResult(ctx, nil) + if err != nil { + return nil, err + } + + vmRef := dummyVMTaskInfo.Result.(object.Reference) + dummyVM := object.NewVirtualMachine(vs.client.Client, vmRef.Reference()) + return dummyVM, nil +} + +func (vs *VSphere) getCurrentNodeResourcePool(ctx context.Context, datacenter *object.Datacenter) (*object.ResourcePool, error) { + // Create a new finder + f := find.NewFinder(vs.client.Client, true) + f.SetDatacenter(datacenter) + + vmRegex := vs.cfg.Global.WorkingDir + vs.localInstanceID + currentVM, err := f.VirtualMachine(ctx, vmRegex) + if err != nil { + return nil, err + } + + currentVMHost, err := currentVM.HostSystem(ctx) + if err != nil { + return nil, err + } + + // Get the resource pool for the current node. + // We create the dummy VM in the same resource pool as current node. + resourcePool, err := currentVMHost.ResourcePool(ctx) + if err != nil { + return nil, err + } + + return resourcePool, nil +} + +// Creates a virtual disk with the policy configured to the disk. +// A call to this function is made only when a user specifies VSAN storage capabilties in the storage class definition. +func (vs *VSphere) createVirtualDiskWithPolicy(ctx context.Context, datacenter *object.Datacenter, datastore *object.Datastore, virtualMachine *object.VirtualMachine, volumeOptions *VolumeOptions) (string, error) { + var diskFormat string + diskFormat = diskFormatValidType[volumeOptions.DiskFormat] + + vmDevices, err := virtualMachine.Device(ctx) + if err != nil { + return "", err + } + var diskControllerType = vs.cfg.Disk.SCSIControllerType + // find SCSI controller of particular type from VM devices + scsiControllersOfRequiredType := getSCSIControllersOfType(vmDevices, diskControllerType) + scsiController := scsiControllersOfRequiredType[0] + + kubeVolsPath := filepath.Clean(datastore.Path(VolDir)) + "/" + // Create a kubevols directory in the datastore if one doesn't exist. + err = makeDirectoryInDatastore(vs.client, datacenter, kubeVolsPath, false) + if err != nil && err != ErrFileAlreadyExist { + glog.Errorf("Cannot create dir %#v. err %s", kubeVolsPath, err) + return "", err + } + + glog.V(4).Infof("Created dir with path as %+q", kubeVolsPath) + + vmDiskPath := kubeVolsPath + volumeOptions.Name + ".vmdk" + disk := vmDevices.CreateDisk(scsiController, datastore.Reference(), vmDiskPath) + unitNumber, err := getNextUnitNumber(vmDevices, scsiController) + if err != nil { + glog.Errorf("cannot attach disk to VM, limit reached - %v.", err) + return "", err + } + *disk.UnitNumber = unitNumber + disk.CapacityInKB = int64(volumeOptions.CapacityKB) + + backing := disk.Backing.(*types.VirtualDiskFlatVer2BackingInfo) + backing.DiskMode = string(types.VirtualDiskModeIndependent_persistent) + + switch diskFormat { + case ThinDiskType: + backing.ThinProvisioned = types.NewBool(true) + case EagerZeroedThickDiskType: + backing.EagerlyScrub = types.NewBool(true) + default: + backing.ThinProvisioned = types.NewBool(false) + } + + // Reconfigure VM + virtualMachineConfigSpec := types.VirtualMachineConfigSpec{} + deviceConfigSpec := &types.VirtualDeviceConfigSpec{ + Device: disk, + Operation: types.VirtualDeviceConfigSpecOperationAdd, + FileOperation: types.VirtualDeviceConfigSpecFileOperationCreate, + } + + storageProfileSpec := &types.VirtualMachineDefinedProfileSpec{ + ProfileId: "", + ProfileData: &types.VirtualMachineProfileRawData{ + ExtensionKey: "com.vmware.vim.sps", + ObjectData: volumeOptions.StorageProfileData, + }, + } + + deviceConfigSpec.Profile = append(deviceConfigSpec.Profile, storageProfileSpec) + virtualMachineConfigSpec.DeviceChange = append(virtualMachineConfigSpec.DeviceChange, deviceConfigSpec) + task, err := virtualMachine.Reconfigure(ctx, virtualMachineConfigSpec) + if err != nil { + glog.Errorf("Failed to reconfigure the VM with the disk with err - %v.", err) + return "", err + } + + err = task.Wait(ctx) + if err != nil { + glog.Errorf("Failed to reconfigure the VM with the disk with err - %v.", err) + return "", err + } + + return vmDiskPath, nil +} + +// creating a scsi controller as there is none found. +func createAndAttachSCSIControllerToVM(ctx context.Context, vm *object.VirtualMachine, diskControllerType string) (types.BaseVirtualDevice, error) { + // Get VM device list + vmDevices, err := vm.Device(ctx) + if err != nil { + return nil, err + } + allSCSIControllers := getSCSIControllers(vmDevices) + if len(allSCSIControllers) >= SCSIControllerLimit { + // we reached the maximum number of controllers we can attach + return nil, fmt.Errorf("SCSI Controller Limit of %d has been reached, cannot create another SCSI controller", SCSIControllerLimit) + } + newSCSIController, err := vmDevices.CreateSCSIController(diskControllerType) + if err != nil { + k8runtime.HandleError(fmt.Errorf("error creating new SCSI controller: %v", err)) + return nil, err + } + configNewSCSIController := newSCSIController.(types.BaseVirtualSCSIController).GetVirtualSCSIController() + hotAndRemove := true + configNewSCSIController.HotAddRemove = &hotAndRemove + configNewSCSIController.SharedBus = types.VirtualSCSISharing(types.VirtualSCSISharingNoSharing) + + // add the scsi controller to virtual machine + err = vm.AddDevice(context.TODO(), newSCSIController) + if err != nil { + glog.V(1).Infof("cannot add SCSI controller to vm - %v", err) + // attempt clean up of scsi controller + if vmDevices, err := vm.Device(ctx); err == nil { + cleanUpController(ctx, newSCSIController, vmDevices, vm) + } + return nil, err + } + return newSCSIController, nil +} + +// Create a virtual disk. +func createVirtualDisk(ctx context.Context, c *govmomi.Client, dc *object.Datacenter, ds *object.Datastore, volumeOptions *VolumeOptions) (string, error) { + kubeVolsPath := filepath.Clean(ds.Path(VolDir)) + "/" + // Create a kubevols directory in the datastore if one doesn't exist. + err := makeDirectoryInDatastore(c, dc, kubeVolsPath, false) + if err != nil && err != ErrFileAlreadyExist { + glog.Errorf("Cannot create dir %#v. err %s", kubeVolsPath, err) + return "", err + } + + glog.V(4).Infof("Created dir with path as %+q", kubeVolsPath) + vmDiskPath := kubeVolsPath + volumeOptions.Name + ".vmdk" + + diskFormat := diskFormatValidType[volumeOptions.DiskFormat] + + // Create a virtual disk manager + virtualDiskManager := object.NewVirtualDiskManager(c.Client) + + // Create specification for new virtual disk + vmDiskSpec := &types.FileBackedVirtualDiskSpec{ + VirtualDiskSpec: types.VirtualDiskSpec{ + AdapterType: LSILogicControllerType, + DiskType: diskFormat, + }, + CapacityKb: int64(volumeOptions.CapacityKB), + } + + // Create virtual disk + task, err := virtualDiskManager.CreateVirtualDisk(ctx, vmDiskPath, dc, vmDiskSpec) + if err != nil { + return "", err + } + return vmDiskPath, task.Wait(ctx) +} + +// Check if the provided datastore is VSAN +func checkIfDatastoreTypeIsVSAN(c *govmomi.Client, datastore *object.Datastore) (bool, error) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + pc := property.DefaultCollector(c.Client) + + // Convert datastores into list of references + var dsRefs []types.ManagedObjectReference + dsRefs = append(dsRefs, datastore.Reference()) + + // Retrieve summary property for the given datastore + var dsMorefs []mo.Datastore + err := pc.Retrieve(ctx, dsRefs, []string{"summary"}, &dsMorefs) + if err != nil { + return false, err + } + + for _, ds := range dsMorefs { + if ds.Summary.Type == VSANDatastoreType { + return true, nil + } + } + return false, nil +} + // Creates a folder using the specified name. // If the intermediate level folders do not exist, // and the parameter createParents is true, @@ -1378,3 +1783,70 @@ func makeDirectoryInDatastore(c *govmomi.Client, dc *object.Datacenter, path str return err } + +// Get the folder for a given VM +func getFolder(ctx context.Context, c *govmomi.Client, datacenterName string, folderName string) (*object.Folder, error) { + f := find.NewFinder(c.Client, true) + + // Fetch and set data center + dc, err := f.Datacenter(ctx, datacenterName) + if err != nil { + return nil, err + } + f.SetDatacenter(dc) + + folderName = strings.TrimSuffix(folderName, "/") + dcFolders, err := dc.Folders(ctx) + vmFolders, _ := dcFolders.VmFolder.Children(ctx) + + var vmFolderRefs []types.ManagedObjectReference + for _, vmFolder := range vmFolders { + vmFolderRefs = append(vmFolderRefs, vmFolder.Reference()) + } + + // Get only references of type folder. + var folderRefs []types.ManagedObjectReference + for _, vmFolder := range vmFolderRefs { + if vmFolder.Type == "Folder" { + folderRefs = append(folderRefs, vmFolder) + } + } + + // Find the specific folder reference matching the folder name. + var resultFolder *object.Folder + pc := property.DefaultCollector(c.Client) + for _, folderRef := range folderRefs { + var refs []types.ManagedObjectReference + var folderMorefs []mo.Folder + refs = append(refs, folderRef) + err = pc.Retrieve(ctx, refs, []string{"name"}, &folderMorefs) + for _, fref := range folderMorefs { + if fref.Name == folderName { + resultFolder = object.NewFolder(c.Client, folderRef) + } + } + } + + return resultFolder, nil +} + +// Delete the VM. +func deleteVM(ctx context.Context, vm *object.VirtualMachine) error { + destroyTask, err := vm.Destroy(ctx) + if err != nil { + return err + } + return destroyTask.Wait(ctx) +} + +// Remove the cluster or folder path from the vDiskPath +// for vDiskPath [DatastoreCluster/sharedVmfs-0] kubevols/e2e-vmdk-1234.vmdk, return value is [sharedVmfs-0] kubevols/e2e-vmdk-1234.vmdk +// for vDiskPath [sharedVmfs-0] kubevols/e2e-vmdk-1234.vmdk, return value remains same [sharedVmfs-0] kubevols/e2e-vmdk-1234.vmdk + +func removeClusterFromVDiskPath(vDiskPath string) string { + datastore := regexp.MustCompile("\\[(.*?)\\]").FindStringSubmatch(vDiskPath)[1] + if filepath.Base(datastore) != datastore { + vDiskPath = strings.Replace(vDiskPath, datastore, filepath.Base(datastore), 1) + } + return vDiskPath +} diff --git a/vendor/k8s.io/kubernetes/pkg/controller/daemon/util/daemonset_util.go b/vendor/k8s.io/kubernetes/pkg/controller/daemon/util/daemonset_util.go index 2b1d7102..6e8b102f 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/daemon/util/daemonset_util.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/daemon/util/daemonset_util.go @@ -27,10 +27,30 @@ import ( ) // GetPodTemplateWithHash returns copy of provided template with additional -// label which contains hash of provided template +// label which contains hash of provided template and sets default daemon tolerations. func GetPodTemplateWithGeneration(template v1.PodTemplateSpec, generation int64) v1.PodTemplateSpec { obj, _ := api.Scheme.DeepCopy(template) newTemplate := obj.(v1.PodTemplateSpec) + // DaemonSet pods shouldn't be deleted by NodeController in case of node problems. + // Add infinite toleration for taint notReady:NoExecute here + // to survive taint-based eviction enforced by NodeController + // when node turns not ready. + v1.AddOrUpdateTolerationInPodSpec(&newTemplate.Spec, &v1.Toleration{ + Key: metav1.TaintNodeNotReady, + Operator: v1.TolerationOpExists, + Effect: v1.TaintEffectNoExecute, + }) + + // DaemonSet pods shouldn't be deleted by NodeController in case of node problems. + // Add infinite toleration for taint unreachable:NoExecute here + // to survive taint-based eviction enforced by NodeController + // when node turns unreachable. + v1.AddOrUpdateTolerationInPodSpec(&newTemplate.Spec, &v1.Toleration{ + Key: metav1.TaintNodeUnreachable, + Operator: v1.TolerationOpExists, + Effect: v1.TaintEffectNoExecute, + }) + templateGenerationStr := fmt.Sprint(generation) newTemplate.ObjectMeta.Labels = labelsutil.CloneAndAddLabel( template.ObjectMeta.Labels, diff --git a/vendor/k8s.io/kubernetes/pkg/controller/deployment/deployment_controller.go b/vendor/k8s.io/kubernetes/pkg/controller/deployment/deployment_controller.go index 9523eb2e..d4d59522 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/deployment/deployment_controller.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/deployment/deployment_controller.go @@ -52,7 +52,11 @@ import ( const ( // maxRetries is the number of times a deployment will be retried before it is dropped out of the queue. - maxRetries = 5 + // With the current rate-limiter in use (5ms*2^(maxRetries-1)) the following numbers represent the times + // a deployment is going to be requeued: + // + // 5ms, 10ms, 20ms, 40ms, 80ms, 160ms, 320ms, 640ms, 1.3s, 2.6s, 5.1s, 10.2s, 20.4s, 41s, 82s + maxRetries = 15 ) // controllerKind contains the schema.GroupVersionKind for this controller type. diff --git a/vendor/k8s.io/kubernetes/pkg/controller/deployment/sync_test.go b/vendor/k8s.io/kubernetes/pkg/controller/deployment/sync_test.go index 68f9c900..8f889697 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/deployment/sync_test.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/deployment/sync_test.go @@ -31,9 +31,9 @@ import ( deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util" ) -func maxSurge(val int) *intstr.IntOrString { - surge := intstr.FromInt(val) - return &surge +func intOrStrP(val int) *intstr.IntOrString { + intOrStr := intstr.FromInt(val) + return &intOrStr } func TestScale(t *testing.T) { @@ -218,8 +218,8 @@ func TestScale(t *testing.T) { }, { name: "deployment with surge pods", - deployment: newDeployment("foo", 20, nil, maxSurge(2), nil, nil), - oldDeployment: newDeployment("foo", 10, nil, maxSurge(2), nil, nil), + deployment: newDeployment("foo", 20, nil, intOrStrP(2), nil, nil), + oldDeployment: newDeployment("foo", 10, nil, intOrStrP(2), nil, nil), newRS: rs("foo-v2", 6, nil, newTimestamp), oldRSs: []*extensions.ReplicaSet{rs("foo-v1", 6, nil, oldTimestamp)}, @@ -229,8 +229,8 @@ func TestScale(t *testing.T) { }, { name: "change both surge and size", - deployment: newDeployment("foo", 50, nil, maxSurge(6), nil, nil), - oldDeployment: newDeployment("foo", 10, nil, maxSurge(3), nil, nil), + deployment: newDeployment("foo", 50, nil, intOrStrP(6), nil, nil), + oldDeployment: newDeployment("foo", 10, nil, intOrStrP(3), nil, nil), newRS: rs("foo-v2", 5, nil, newTimestamp), oldRSs: []*extensions.ReplicaSet{rs("foo-v1", 8, nil, oldTimestamp)}, @@ -249,6 +249,21 @@ func TestScale(t *testing.T) { expectedNew: nil, expectedOld: []*extensions.ReplicaSet{rs("foo-v2", 10, nil, newTimestamp), rs("foo-v1", 4, nil, oldTimestamp)}, }, + { + name: "saturated but broken new replica set does not affect old pods", + deployment: newDeployment("foo", 2, nil, intOrStrP(1), intOrStrP(1), nil), + oldDeployment: newDeployment("foo", 2, nil, intOrStrP(1), intOrStrP(1), nil), + + newRS: func() *extensions.ReplicaSet { + rs := rs("foo-v2", 2, nil, newTimestamp) + rs.Status.AvailableReplicas = 0 + return rs + }(), + oldRSs: []*extensions.ReplicaSet{rs("foo-v1", 1, nil, oldTimestamp)}, + + expectedNew: rs("foo-v2", 2, nil, newTimestamp), + expectedOld: []*extensions.ReplicaSet{rs("foo-v1", 1, nil, oldTimestamp)}, + }, } for _, test := range tests { diff --git a/vendor/k8s.io/kubernetes/pkg/controller/deployment/util/deployment_util.go b/vendor/k8s.io/kubernetes/pkg/controller/deployment/util/deployment_util.go index ee7b3995..af13c593 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/deployment/util/deployment_util.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/deployment/util/deployment_util.go @@ -1059,7 +1059,8 @@ func NewRSNewReplicas(deployment *extensions.Deployment, allRSs []*extensions.Re // IsSaturated checks if the new replica set is saturated by comparing its size with its deployment size. // Both the deployment and the replica set have to believe this replica set can own all of the desired -// replicas in the deployment and the annotation helps in achieving that. +// replicas in the deployment and the annotation helps in achieving that. All pods of the ReplicaSet +// need to be available. func IsSaturated(deployment *extensions.Deployment, rs *extensions.ReplicaSet) bool { if rs == nil { return false @@ -1069,7 +1070,9 @@ func IsSaturated(deployment *extensions.Deployment, rs *extensions.ReplicaSet) b if err != nil { return false } - return *(rs.Spec.Replicas) == *(deployment.Spec.Replicas) && int32(desired) == *(deployment.Spec.Replicas) + return *(rs.Spec.Replicas) == *(deployment.Spec.Replicas) && + int32(desired) == *(deployment.Spec.Replicas) && + rs.Status.AvailableReplicas == *(deployment.Spec.Replicas) } // WaitForObservedDeployment polls for deployment to be updated so that deployment.Status.ObservedGeneration >= desiredGeneration. diff --git a/vendor/k8s.io/kubernetes/pkg/controller/node/controller_utils.go b/vendor/k8s.io/kubernetes/pkg/controller/node/controller_utils.go index f88c0071..9127afbf 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/node/controller_utils.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/node/controller_utils.go @@ -36,8 +36,9 @@ import ( "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" extensionslisters "k8s.io/kubernetes/pkg/client/listers/extensions/v1beta1" "k8s.io/kubernetes/pkg/cloudprovider" + "k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/kubelet/util/format" - "k8s.io/kubernetes/pkg/util/node" + nodepkg "k8s.io/kubernetes/pkg/util/node" utilversion "k8s.io/kubernetes/pkg/util/version" "github.com/golang/glog" @@ -102,12 +103,12 @@ func deletePods(kubeClient clientset.Interface, recorder record.EventRecorder, n // setPodTerminationReason attempts to set a reason and message in the pod status, updates it in the apiserver, // and returns an error if it encounters one. func setPodTerminationReason(kubeClient clientset.Interface, pod *v1.Pod, nodeName string) (*v1.Pod, error) { - if pod.Status.Reason == node.NodeUnreachablePodReason { + if pod.Status.Reason == nodepkg.NodeUnreachablePodReason { return pod, nil } - pod.Status.Reason = node.NodeUnreachablePodReason - pod.Status.Message = fmt.Sprintf(node.NodeUnreachablePodMessage, nodeName, pod.Name) + pod.Status.Reason = nodepkg.NodeUnreachablePodReason + pod.Status.Message = fmt.Sprintf(nodepkg.NodeUnreachablePodMessage, nodeName, pod.Name) var updatedPod *v1.Pod var err error @@ -286,3 +287,32 @@ func recordNodeStatusChange(recorder record.EventRecorder, node *v1.Node, new_st // and event is recorded or neither should happen, see issue #6055. recorder.Eventf(ref, v1.EventTypeNormal, new_status, "Node %s status is now: %s", node.Name, new_status) } + +// Returns true in case of success and false otherwise +func swapNodeControllerTaint(kubeClient clientset.Interface, taintToAdd, taintToRemove *v1.Taint, node *v1.Node) bool { + taintToAdd.TimeAdded = metav1.Now() + err := controller.AddOrUpdateTaintOnNode(kubeClient, node.Name, taintToAdd) + if err != nil { + utilruntime.HandleError( + fmt.Errorf( + "unable to taint %v unresponsive Node %q: %v", + taintToAdd.Key, + node.Name, + err)) + return false + } + glog.V(4).Infof("Added %v Taint to Node %v", taintToAdd, node.Name) + + err = controller.RemoveTaintOffNode(kubeClient, node.Name, taintToRemove, node) + if err != nil { + utilruntime.HandleError( + fmt.Errorf( + "unable to remove %v unneeded taint from unresponsive Node %q: %v", + taintToRemove.Key, + node.Name, + err)) + return false + } + glog.V(4).Infof("Made sure that Node %v has no %v Taint", node.Name, taintToRemove) + return true +} diff --git a/vendor/k8s.io/kubernetes/pkg/controller/node/nodecontroller.go b/vendor/k8s.io/kubernetes/pkg/controller/node/nodecontroller.go index 87b2de1e..0ca256b2 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/node/nodecontroller.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/node/nodecontroller.go @@ -478,6 +478,74 @@ func NewNodeController( return nc, nil } +func (nc *NodeController) doEvictionPass() { + nc.evictorLock.Lock() + defer nc.evictorLock.Unlock() + for k := range nc.zonePodEvictor { + // Function should return 'false' and a time after which it should be retried, or 'true' if it shouldn't (it succeeded). + nc.zonePodEvictor[k].Try(func(value TimedValue) (bool, time.Duration) { + node, err := nc.nodeLister.Get(value.Value) + if apierrors.IsNotFound(err) { + glog.Warningf("Node %v no longer present in nodeLister!", value.Value) + } else if err != nil { + glog.Warningf("Failed to get Node %v from the nodeLister: %v", value.Value, err) + } else { + zone := utilnode.GetZoneKey(node) + EvictionsNumber.WithLabelValues(zone).Inc() + } + nodeUid, _ := value.UID.(string) + remaining, err := deletePods(nc.kubeClient, nc.recorder, value.Value, nodeUid, nc.daemonSetStore) + if err != nil { + utilruntime.HandleError(fmt.Errorf("unable to evict node %q: %v", value.Value, err)) + return false, 0 + } + if remaining { + glog.Infof("Pods awaiting deletion due to NodeController eviction") + } + return true, 0 + }) + } +} + +func (nc *NodeController) doTaintingPass() { + nc.evictorLock.Lock() + defer nc.evictorLock.Unlock() + for k := range nc.zoneNotReadyOrUnreachableTainer { + // Function should return 'false' and a time after which it should be retried, or 'true' if it shouldn't (it succeeded). + nc.zoneNotReadyOrUnreachableTainer[k].Try(func(value TimedValue) (bool, time.Duration) { + node, err := nc.nodeLister.Get(value.Value) + if apierrors.IsNotFound(err) { + glog.Warningf("Node %v no longer present in nodeLister!", value.Value) + return true, 0 + } else if err != nil { + glog.Warningf("Failed to get Node %v from the nodeLister: %v", value.Value, err) + // retry in 50 millisecond + return false, 50 * time.Millisecond + } else { + zone := utilnode.GetZoneKey(node) + EvictionsNumber.WithLabelValues(zone).Inc() + } + _, condition := v1.GetNodeCondition(&node.Status, v1.NodeReady) + // Because we want to mimic NodeStatus.Condition["Ready"] we make "unreachable" and "not ready" taints mutually exclusive. + taintToAdd := v1.Taint{} + oppositeTaint := v1.Taint{} + if condition.Status == v1.ConditionFalse { + taintToAdd = *NotReadyTaintTemplate + oppositeTaint = *UnreachableTaintTemplate + } else if condition.Status == v1.ConditionUnknown { + taintToAdd = *UnreachableTaintTemplate + oppositeTaint = *NotReadyTaintTemplate + } else { + // It seems that the Node is ready again, so there's no need to taint it. + glog.V(4).Infof("Node %v was in a taint queue, but it's ready now. Ignoring taint request.", value.Value) + return true, 0 + } + + return swapNodeControllerTaint(nc.kubeClient, &taintToAdd, &oppositeTaint, node), 0 + }) + } +} + // Run starts an asynchronous loop that monitors the status of cluster nodes. func (nc *NodeController) Run() { go func() { @@ -502,101 +570,12 @@ func (nc *NodeController) Run() { if nc.useTaintBasedEvictions { // Handling taint based evictions. Because we don't want a dedicated logic in TaintManager for NC-originated // taints and we normally don't rate limit evictions caused by taints, we need to rate limit adding taints. - go wait.Until(func() { - nc.evictorLock.Lock() - defer nc.evictorLock.Unlock() - for k := range nc.zoneNotReadyOrUnreachableTainer { - // Function should return 'false' and a time after which it should be retried, or 'true' if it shouldn't (it succeeded). - nc.zoneNotReadyOrUnreachableTainer[k].Try(func(value TimedValue) (bool, time.Duration) { - node, err := nc.nodeLister.Get(value.Value) - if apierrors.IsNotFound(err) { - glog.Warningf("Node %v no longer present in nodeLister!", value.Value) - return true, 0 - } else if err != nil { - glog.Warningf("Failed to get Node %v from the nodeLister: %v", value.Value, err) - // retry in 50 millisecond - return false, 50 * time.Millisecond - } else { - zone := utilnode.GetZoneKey(node) - EvictionsNumber.WithLabelValues(zone).Inc() - } - _, condition := v1.GetNodeCondition(&node.Status, v1.NodeReady) - // Because we want to mimic NodeStatus.Condition["Ready"] we make "unreachable" and "not ready" taints mutually exclusive. - taintToAdd := v1.Taint{} - oppositeTaint := v1.Taint{} - if condition.Status == v1.ConditionFalse { - taintToAdd = *NotReadyTaintTemplate - oppositeTaint = *UnreachableTaintTemplate - } else if condition.Status == v1.ConditionUnknown { - taintToAdd = *UnreachableTaintTemplate - oppositeTaint = *NotReadyTaintTemplate - } else { - // It seems that the Node is ready again, so there's no need to taint it. - glog.V(4).Infof("Node %v was in a taint queue, but it's ready now. Ignoring taint request.", value.Value) - return true, 0 - } - - taintToAdd.TimeAdded = metav1.Now() - err = controller.AddOrUpdateTaintOnNode(nc.kubeClient, value.Value, &taintToAdd) - if err != nil { - utilruntime.HandleError( - fmt.Errorf( - "unable to taint %v unresponsive Node %q: %v", - taintToAdd.Key, - value.Value, - err)) - return false, 0 - } else { - glog.V(4).Info("Added %v Taint to Node %v", taintToAdd, value.Value) - } - err = controller.RemoveTaintOffNode(nc.kubeClient, value.Value, &oppositeTaint, node) - if err != nil { - utilruntime.HandleError( - fmt.Errorf( - "unable to remove %v unneeded taint from unresponsive Node %q: %v", - oppositeTaint.Key, - value.Value, - err)) - return false, 0 - } else { - glog.V(4).Info("Made sure that Node %v has no %v Taint", value.Value, oppositeTaint) - } - return true, 0 - }) - } - }, nodeEvictionPeriod, wait.NeverStop) + go wait.Until(nc.doTaintingPass, nodeEvictionPeriod, wait.NeverStop) } else { // Managing eviction of nodes: // When we delete pods off a node, if the node was not empty at the time we then // queue an eviction watcher. If we hit an error, retry deletion. - go wait.Until(func() { - nc.evictorLock.Lock() - defer nc.evictorLock.Unlock() - for k := range nc.zonePodEvictor { - // Function should return 'false' and a time after which it should be retried, or 'true' if it shouldn't (it succeeded). - nc.zonePodEvictor[k].Try(func(value TimedValue) (bool, time.Duration) { - node, err := nc.nodeLister.Get(value.Value) - if apierrors.IsNotFound(err) { - glog.Warningf("Node %v no longer present in nodeLister!", value.Value) - } else if err != nil { - glog.Warningf("Failed to get Node %v from the nodeLister: %v", value.Value, err) - } else { - zone := utilnode.GetZoneKey(node) - EvictionsNumber.WithLabelValues(zone).Inc() - } - nodeUid, _ := value.UID.(string) - remaining, err := deletePods(nc.kubeClient, nc.recorder, value.Value, nodeUid, nc.daemonSetStore) - if err != nil { - utilruntime.HandleError(fmt.Errorf("unable to evict node %q: %v", value.Value, err)) - return false, 0 - } - if remaining { - glog.Infof("Pods awaiting deletion due to NodeController eviction") - } - return true, 0 - }) - } - }, nodeEvictionPeriod, wait.NeverStop) + go wait.Until(nc.doEvictionPass, nodeEvictionPeriod, wait.NeverStop) } }() } @@ -685,7 +664,13 @@ func (nc *NodeController) monitorNodeStatus() error { // Check eviction timeout against decisionTimestamp if observedReadyCondition.Status == v1.ConditionFalse { if nc.useTaintBasedEvictions { - if nc.markNodeForTainting(node) { + // We want to update the taint straight away if Node is already tainted with the UnreachableTaint + if v1.TaintExists(node.Spec.Taints, UnreachableTaintTemplate) { + taintToAdd := *NotReadyTaintTemplate + if !swapNodeControllerTaint(nc.kubeClient, &taintToAdd, UnreachableTaintTemplate, node) { + glog.Errorf("Failed to instantly swap UnreachableTaint to NotReadyTaint. Will try again in the next cycle.") + } + } else if nc.markNodeForTainting(node) { glog.V(2).Infof("Node %v is NotReady as of %v. Adding it to the Taint queue.", node.Name, decisionTimestamp, @@ -706,7 +691,13 @@ func (nc *NodeController) monitorNodeStatus() error { } if observedReadyCondition.Status == v1.ConditionUnknown { if nc.useTaintBasedEvictions { - if nc.markNodeForTainting(node) { + // We want to update the taint straight away if Node is already tainted with the UnreachableTaint + if v1.TaintExists(node.Spec.Taints, NotReadyTaintTemplate) { + taintToAdd := *UnreachableTaintTemplate + if !swapNodeControllerTaint(nc.kubeClient, &taintToAdd, NotReadyTaintTemplate, node) { + glog.Errorf("Failed to instantly swap UnreachableTaint to NotReadyTaint. Will try again in the next cycle.") + } + } else if nc.markNodeForTainting(node) { glog.V(2).Infof("Node %v is unresponsive as of %v. Adding it to the Taint queue.", node.Name, decisionTimestamp, diff --git a/vendor/k8s.io/kubernetes/pkg/controller/node/nodecontroller_test.go b/vendor/k8s.io/kubernetes/pkg/controller/node/nodecontroller_test.go index 52d0a2ca..f2596c84 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/node/nodecontroller_test.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/node/nodecontroller_test.go @@ -74,7 +74,9 @@ func NewNodeControllerFromClient( clusterCIDR *net.IPNet, serviceCIDR *net.IPNet, nodeCIDRMaskSize int, - allocateNodeCIDRs bool) (*nodeController, error) { + allocateNodeCIDRs bool, + useTaints bool, +) (*nodeController, error) { factory := informers.NewSharedInformerFactory(kubeClient, controller.NoResyncPeriodFunc()) @@ -99,8 +101,8 @@ func NewNodeControllerFromClient( serviceCIDR, nodeCIDRMaskSize, allocateNodeCIDRs, - false, - false, + useTaints, + useTaints, ) if err != nil { return nil, err @@ -549,7 +551,7 @@ func TestMonitorNodeStatusEvictPods(t *testing.T) { for _, item := range table { nodeController, _ := NewNodeControllerFromClient(nil, item.fakeNodeHandler, evictionTimeout, testRateLimiterQPS, testRateLimiterQPS, testLargeClusterThreshold, testUnhealtyThreshold, testNodeMonitorGracePeriod, - testNodeStartupGracePeriod, testNodeMonitorPeriod, nil, nil, 0, false) + testNodeStartupGracePeriod, testNodeMonitorPeriod, nil, nil, 0, false, false) nodeController.now = func() metav1.Time { return fakeNow } nodeController.recorder = testutil.NewFakeRecorder() for _, ds := range item.daemonSets { @@ -698,7 +700,7 @@ func TestPodStatusChange(t *testing.T) { for _, item := range table { nodeController, _ := NewNodeControllerFromClient(nil, item.fakeNodeHandler, evictionTimeout, testRateLimiterQPS, testRateLimiterQPS, testLargeClusterThreshold, testUnhealtyThreshold, testNodeMonitorGracePeriod, - testNodeStartupGracePeriod, testNodeMonitorPeriod, nil, nil, 0, false) + testNodeStartupGracePeriod, testNodeMonitorPeriod, nil, nil, 0, false, false) nodeController.now = func() metav1.Time { return fakeNow } nodeController.recorder = testutil.NewFakeRecorder() if err := syncNodeStore(nodeController, item.fakeNodeHandler); err != nil { @@ -1215,7 +1217,7 @@ func TestMonitorNodeStatusEvictPodsWithDisruption(t *testing.T) { } nodeController, _ := NewNodeControllerFromClient(nil, fakeNodeHandler, evictionTimeout, testRateLimiterQPS, testRateLimiterQPS, testLargeClusterThreshold, testUnhealtyThreshold, testNodeMonitorGracePeriod, - testNodeStartupGracePeriod, testNodeMonitorPeriod, nil, nil, 0, false) + testNodeStartupGracePeriod, testNodeMonitorPeriod, nil, nil, 0, false, false) nodeController.now = func() metav1.Time { return fakeNow } nodeController.enterPartialDisruptionFunc = func(nodeNum int) float32 { return testRateLimiterQPS @@ -1310,7 +1312,7 @@ func TestCloudProviderNoRateLimit(t *testing.T) { nodeController, _ := NewNodeControllerFromClient(nil, fnh, 10*time.Minute, testRateLimiterQPS, testRateLimiterQPS, testLargeClusterThreshold, testUnhealtyThreshold, testNodeMonitorGracePeriod, testNodeStartupGracePeriod, - testNodeMonitorPeriod, nil, nil, 0, false) + testNodeMonitorPeriod, nil, nil, 0, false, false) nodeController.cloud = &fakecloud.FakeCloud{} nodeController.now = func() metav1.Time { return metav1.Date(2016, 1, 1, 12, 0, 0, 0, time.UTC) } nodeController.recorder = testutil.NewFakeRecorder() @@ -1579,7 +1581,7 @@ func TestMonitorNodeStatusUpdateStatus(t *testing.T) { for i, item := range table { nodeController, _ := NewNodeControllerFromClient(nil, item.fakeNodeHandler, 5*time.Minute, testRateLimiterQPS, testRateLimiterQPS, testLargeClusterThreshold, testUnhealtyThreshold, - testNodeMonitorGracePeriod, testNodeStartupGracePeriod, testNodeMonitorPeriod, nil, nil, 0, false) + testNodeMonitorGracePeriod, testNodeStartupGracePeriod, testNodeMonitorPeriod, nil, nil, 0, false, false) nodeController.now = func() metav1.Time { return fakeNow } nodeController.recorder = testutil.NewFakeRecorder() if err := syncNodeStore(nodeController, item.fakeNodeHandler); err != nil { @@ -1813,7 +1815,7 @@ func TestMonitorNodeStatusMarkPodsNotReady(t *testing.T) { for i, item := range table { nodeController, _ := NewNodeControllerFromClient(nil, item.fakeNodeHandler, 5*time.Minute, testRateLimiterQPS, testRateLimiterQPS, testLargeClusterThreshold, testUnhealtyThreshold, - testNodeMonitorGracePeriod, testNodeStartupGracePeriod, testNodeMonitorPeriod, nil, nil, 0, false) + testNodeMonitorGracePeriod, testNodeStartupGracePeriod, testNodeMonitorPeriod, nil, nil, 0, false, false) nodeController.now = func() metav1.Time { return fakeNow } nodeController.recorder = testutil.NewFakeRecorder() if err := syncNodeStore(nodeController, item.fakeNodeHandler); err != nil { @@ -1845,6 +1847,146 @@ func TestMonitorNodeStatusMarkPodsNotReady(t *testing.T) { } } +func TestSwapUnreachableNotReadyTaints(t *testing.T) { + fakeNow := metav1.Date(2017, 1, 1, 12, 0, 0, 0, time.UTC) + evictionTimeout := 10 * time.Minute + + fakeNodeHandler := &testutil.FakeNodeHandler{ + Existing: []*v1.Node{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "node0", + CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), + Labels: map[string]string{ + metav1.LabelZoneRegion: "region1", + metav1.LabelZoneFailureDomain: "zone1", + }, + }, + Status: v1.NodeStatus{ + Conditions: []v1.NodeCondition{ + { + Type: v1.NodeReady, + Status: v1.ConditionUnknown, + LastHeartbeatTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), + LastTransitionTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), + }, + }, + }, + }, + // Because of the logic that prevents NC from evicting anything when all Nodes are NotReady + // we need second healthy node in tests. Because of how the tests are written we need to update + // the status of this Node. + { + ObjectMeta: metav1.ObjectMeta{ + Name: "node1", + CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), + Labels: map[string]string{ + metav1.LabelZoneRegion: "region1", + metav1.LabelZoneFailureDomain: "zone1", + }, + }, + Status: v1.NodeStatus{ + Conditions: []v1.NodeCondition{ + { + Type: v1.NodeReady, + Status: v1.ConditionTrue, + LastHeartbeatTime: metav1.Date(2017, 1, 1, 12, 0, 0, 0, time.UTC), + LastTransitionTime: metav1.Date(2017, 1, 1, 12, 0, 0, 0, time.UTC), + }, + }, + }, + }, + }, + Clientset: fake.NewSimpleClientset(&v1.PodList{Items: []v1.Pod{*testutil.NewPod("pod0", "node0")}}), + } + timeToPass := evictionTimeout + newNodeStatus := v1.NodeStatus{ + Conditions: []v1.NodeCondition{ + { + Type: v1.NodeReady, + Status: v1.ConditionFalse, + // Node status has just been updated, and is NotReady for 10min. + LastHeartbeatTime: metav1.Date(2017, 1, 1, 12, 9, 0, 0, time.UTC), + LastTransitionTime: metav1.Date(2017, 1, 1, 12, 0, 0, 0, time.UTC), + }, + }, + } + healthyNodeNewStatus := v1.NodeStatus{ + Conditions: []v1.NodeCondition{ + { + Type: v1.NodeReady, + Status: v1.ConditionTrue, + LastHeartbeatTime: metav1.Date(2017, 1, 1, 12, 10, 0, 0, time.UTC), + LastTransitionTime: metav1.Date(2017, 1, 1, 12, 0, 0, 0, time.UTC), + }, + }, + } + originalTaint := UnreachableTaintTemplate + updatedTaint := NotReadyTaintTemplate + + nodeController, _ := NewNodeControllerFromClient(nil, fakeNodeHandler, + evictionTimeout, testRateLimiterQPS, testRateLimiterQPS, testLargeClusterThreshold, testUnhealtyThreshold, testNodeMonitorGracePeriod, + testNodeStartupGracePeriod, testNodeMonitorPeriod, nil, nil, 0, false, true) + nodeController.now = func() metav1.Time { return fakeNow } + nodeController.recorder = testutil.NewFakeRecorder() + if err := syncNodeStore(nodeController, fakeNodeHandler); err != nil { + t.Errorf("unexpected error: %v", err) + } + if err := nodeController.monitorNodeStatus(); err != nil { + t.Errorf("unexpected error: %v", err) + } + nodeController.doTaintingPass() + + node0, err := fakeNodeHandler.Get("node0", metav1.GetOptions{}) + if err != nil { + t.Errorf("Can't get current node0...") + return + } + node1, err := fakeNodeHandler.Get("node1", metav1.GetOptions{}) + if err != nil { + t.Errorf("Can't get current node1...") + return + } + + if originalTaint != nil && !v1.TaintExists(node0.Spec.Taints, originalTaint) { + t.Errorf("Can't find taint %v in %v", originalTaint, node0.Spec.Taints) + } + + nodeController.now = func() metav1.Time { return metav1.Time{Time: fakeNow.Add(timeToPass)} } + + node0.Status = newNodeStatus + node1.Status = healthyNodeNewStatus + _, err = fakeNodeHandler.UpdateStatus(node0) + if err != nil { + t.Errorf(err.Error()) + return + } + _, err = fakeNodeHandler.UpdateStatus(node1) + if err != nil { + t.Errorf(err.Error()) + return + } + + if err := syncNodeStore(nodeController, fakeNodeHandler); err != nil { + t.Errorf("unexpected error: %v", err) + } + if err := nodeController.monitorNodeStatus(); err != nil { + t.Errorf("unexpected error: %v", err) + } + nodeController.doTaintingPass() + + node0, err = fakeNodeHandler.Get("node0", metav1.GetOptions{}) + if err != nil { + t.Errorf("Can't get current node0...") + return + } + if updatedTaint != nil { + if !v1.TaintExists(node0.Spec.Taints, updatedTaint) { + t.Errorf("Can't find taint %v in %v", updatedTaint, node0.Spec.Taints) + } + } +} + func TestNodeEventGeneration(t *testing.T) { fakeNow := metav1.Date(2016, 9, 10, 12, 0, 0, 0, time.UTC) fakeNodeHandler := &testutil.FakeNodeHandler{ @@ -1876,7 +2018,7 @@ func TestNodeEventGeneration(t *testing.T) { nodeController, _ := NewNodeControllerFromClient(nil, fakeNodeHandler, 5*time.Minute, testRateLimiterQPS, testRateLimiterQPS, testLargeClusterThreshold, testUnhealtyThreshold, testNodeMonitorGracePeriod, testNodeStartupGracePeriod, - testNodeMonitorPeriod, nil, nil, 0, false) + testNodeMonitorPeriod, nil, nil, 0, false, false) nodeController.cloud = &fakecloud.FakeCloud{} nodeController.nodeExistsInCloudProvider = func(nodeName types.NodeName) (bool, error) { return false, nil @@ -1987,7 +2129,7 @@ func TestCheckPod(t *testing.T) { }, } - nc, _ := NewNodeControllerFromClient(nil, fake.NewSimpleClientset(), 0, 0, 0, 0, 0, 0, 0, 0, nil, nil, 0, false) + nc, _ := NewNodeControllerFromClient(nil, fake.NewSimpleClientset(), 0, 0, 0, 0, 0, 0, 0, 0, nil, nil, 0, false, false) nc.nodeInformer.Informer().GetStore().Add(&v1.Node{ ObjectMeta: metav1.ObjectMeta{ Name: "new", diff --git a/vendor/k8s.io/kubernetes/pkg/controller/node/testutil/BUILD b/vendor/k8s.io/kubernetes/pkg/controller/node/testutil/BUILD index 84685c24..d1496620 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/node/testutil/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/controller/node/testutil/BUILD @@ -17,6 +17,7 @@ go_library( "//pkg/client/clientset_generated/clientset/fake:go_default_library", "//pkg/client/clientset_generated/clientset/typed/core/v1:go_default_library", "//pkg/util/node:go_default_library", + "//vendor:github.com/evanphx/json-patch", "//vendor:github.com/golang/glog", "//vendor:k8s.io/apimachinery/pkg/api/errors", "//vendor:k8s.io/apimachinery/pkg/api/resource", @@ -24,6 +25,7 @@ go_library( "//vendor:k8s.io/apimachinery/pkg/runtime", "//vendor:k8s.io/apimachinery/pkg/types", "//vendor:k8s.io/apimachinery/pkg/util/sets", + "//vendor:k8s.io/apimachinery/pkg/util/strategicpatch", "//vendor:k8s.io/apimachinery/pkg/watch", "//vendor:k8s.io/client-go/pkg/api/v1", "//vendor:k8s.io/client-go/util/clock", diff --git a/vendor/k8s.io/kubernetes/pkg/controller/node/testutil/test_utils.go b/vendor/k8s.io/kubernetes/pkg/controller/node/testutil/test_utils.go index 12093235..da2fdbc3 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/node/testutil/test_utils.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/node/testutil/test_utils.go @@ -17,6 +17,7 @@ limitations under the License. package testutil import ( + "encoding/json" "errors" "fmt" "sync" @@ -28,16 +29,19 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/strategicpatch" "k8s.io/apimachinery/pkg/watch" clientv1 "k8s.io/client-go/pkg/api/v1" "k8s.io/client-go/util/clock" + "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/fake" v1core "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1" utilnode "k8s.io/kubernetes/pkg/util/node" + "github.com/evanphx/json-patch" "github.com/golang/glog" ) @@ -189,6 +193,7 @@ func (m *FakeNodeHandler) Update(node *v1.Node) (*v1.Node, error) { m.RequestCount++ m.lock.Unlock() }() + nodeCopy := *node for i, updateNode := range m.UpdatedNodes { if updateNode.Name == nodeCopy.Name { @@ -207,6 +212,35 @@ func (m *FakeNodeHandler) UpdateStatus(node *v1.Node) (*v1.Node, error) { m.RequestCount++ m.lock.Unlock() }() + + var origNodeCopy v1.Node + found := false + for i := range m.Existing { + if m.Existing[i].Name == node.Name { + origNodeCopy = *m.Existing[i] + found = true + } + } + updatedNodeIndex := -1 + for i := range m.UpdatedNodes { + if m.UpdatedNodes[i].Name == node.Name { + origNodeCopy = *m.UpdatedNodes[i] + updatedNodeIndex = i + found = true + } + } + + if !found { + return nil, fmt.Errorf("Not found node %v", node) + } + + origNodeCopy.Status = node.Status + if updatedNodeIndex < 0 { + m.UpdatedNodes = append(m.UpdatedNodes, &origNodeCopy) + } else { + m.UpdatedNodes[updatedNodeIndex] = &origNodeCopy + } + nodeCopy := *node m.UpdatedNodeStatuses = append(m.UpdatedNodeStatuses, &nodeCopy) return node, nil @@ -225,7 +259,76 @@ func (m *FakeNodeHandler) Watch(opts metav1.ListOptions) (watch.Interface, error // Patch patches a Node in the fake store. func (m *FakeNodeHandler) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (*v1.Node, error) { - return nil, nil + m.lock.Lock() + defer func() { + m.RequestCount++ + m.lock.Unlock() + }() + var nodeCopy v1.Node + for i := range m.Existing { + if m.Existing[i].Name == name { + nodeCopy = *m.Existing[i] + } + } + updatedNodeIndex := -1 + for i := range m.UpdatedNodes { + if m.UpdatedNodes[i].Name == name { + nodeCopy = *m.UpdatedNodes[i] + updatedNodeIndex = i + } + } + + originalObjJS, err := json.Marshal(nodeCopy) + if err != nil { + glog.Errorf("Failed to marshal %v", nodeCopy) + return nil, nil + } + var originalNode v1.Node + if err = json.Unmarshal(originalObjJS, &originalNode); err != nil { + glog.Errorf("Failed to unmarshall original object: %v", err) + return nil, nil + } + + var patchedObjJS []byte + switch pt { + case types.JSONPatchType: + patchObj, err := jsonpatch.DecodePatch(data) + if err != nil { + glog.Error(err.Error()) + return nil, nil + } + if patchedObjJS, err = patchObj.Apply(originalObjJS); err != nil { + glog.Error(err.Error()) + return nil, nil + } + case types.MergePatchType: + if patchedObjJS, err = jsonpatch.MergePatch(originalObjJS, data); err != nil { + glog.Error(err.Error()) + return nil, nil + } + case types.StrategicMergePatchType: + if patchedObjJS, err = strategicpatch.StrategicMergePatch(originalObjJS, data, originalNode); err != nil { + glog.Error(err.Error()) + return nil, nil + } + default: + glog.Errorf("unknown Content-Type header for patch: %v", pt) + return nil, nil + } + + var updatedNode v1.Node + if err = json.Unmarshal(patchedObjJS, &updatedNode); err != nil { + glog.Errorf("Failed to unmarshall patched object: %v", err) + return nil, nil + } + + if updatedNodeIndex < 0 { + m.UpdatedNodes = append(m.UpdatedNodes, &updatedNode) + } else { + m.UpdatedNodes[updatedNodeIndex] = &updatedNode + } + + return &updatedNode, nil } // FakeRecorder is used as a fake during testing. diff --git a/vendor/k8s.io/kubernetes/pkg/controller/service/BUILD b/vendor/k8s.io/kubernetes/pkg/controller/service/BUILD index fc3ffc62..da8aa335 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/service/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/controller/service/BUILD @@ -16,6 +16,7 @@ go_library( ], tags = ["automanaged"], deps = [ + "//cmd/kubeadm/app/constants:go_default_library", "//pkg/api:go_default_library", "//pkg/api/v1:go_default_library", "//pkg/client/clientset_generated/clientset:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/controller/service/servicecontroller.go b/vendor/k8s.io/kubernetes/pkg/controller/service/servicecontroller.go index 6a9066b2..a4462e32 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/service/servicecontroller.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/service/servicecontroller.go @@ -34,6 +34,7 @@ import ( "k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/record" "k8s.io/client-go/util/workqueue" + "k8s.io/kubernetes/cmd/kubeadm/app/constants" "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" @@ -617,10 +618,16 @@ func getNodeConditionPredicate() corelisters.NodeConditionPredicate { return func(node *v1.Node) bool { // We add the master to the node list, but its unschedulable. So we use this to filter // the master. - // TODO: Use a node annotation to indicate the master if node.Spec.Unschedulable { return false } + + // As of 1.6, we will taint the master, but not necessarily mark it unschedulable. + // Recognize nodes labeled as master, and filter them also, as we were doing previously. + if _, hasMasterRoleLabel := node.Labels[constants.LabelNodeRoleMaster]; hasMasterRoleLabel { + return false + } + // If we have no info, don't accept if len(node.Status.Conditions) == 0 { return false diff --git a/vendor/k8s.io/kubernetes/pkg/controller/statefulset/stateful_set_control.go b/vendor/k8s.io/kubernetes/pkg/controller/statefulset/stateful_set_control.go index 6389d791..87cf3a9b 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/statefulset/stateful_set_control.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/statefulset/stateful_set_control.go @@ -132,6 +132,13 @@ func (ssc *defaultStatefulSetControl) UpdateStatefulSet(set *apps.StatefulSet, p if !isCreated(replicas[i]) { return ssc.podControl.CreateStatefulPod(set, replicas[i]) } + // If we find a Pod that is currently terminating, we must wait until graceful deletion + // completes before we continue to make progress. + if isTerminating(replicas[i]) { + glog.V(2).Infof("StatefulSet %s is waiting for Pod %s to Terminate", + set.Name, replicas[i].Name) + return nil + } // If we have a Pod that has been created but is not running and ready we can not make progress. // We must ensure that all for each Pod, when we create it, all of its predecessors, with respect to its // ordinal, are Running and Ready. diff --git a/vendor/k8s.io/kubernetes/pkg/controller/statefulset/stateful_set_control_test.go b/vendor/k8s.io/kubernetes/pkg/controller/statefulset/stateful_set_control_test.go index 2a74eeea..78e3ea79 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/statefulset/stateful_set_control_test.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/statefulset/stateful_set_control_test.go @@ -403,6 +403,72 @@ func TestDefaultStatefulSetControlUpdatePodFailure(t *testing.T) { } } +func TestDefaultStatefulSetControlBlocksOnTerminating(t *testing.T) { + set := newStatefulSet(3) + client := fake.NewSimpleClientset(set) + + informerFactory := informers.NewSharedInformerFactory(client, controller.NoResyncPeriodFunc()) + spc := newFakeStatefulPodControl(informerFactory.Core().V1().Pods(), informerFactory.Apps().V1beta1().StatefulSets()) + ssc := NewDefaultStatefulSetControl(spc) + spc.SetUpdateStatefulPodError(apierrors.NewInternalError(errors.New("API server failed")), 0) + + stop := make(chan struct{}) + defer close(stop) + informerFactory.Start(stop) + cache.WaitForCacheSync( + stop, + informerFactory.Apps().V1beta1().StatefulSets().Informer().HasSynced, + informerFactory.Core().V1().Pods().Informer().HasSynced, + ) + + if err := scaleUpStatefulSetControl(set, ssc, spc); err != nil { + t.Fatalf("Unexpected error: %v", err) + } + var err error + set, err = spc.setsLister.StatefulSets(set.Namespace).Get(set.Name) + if err != nil { + t.Fatalf("Error getting updated StatefulSet: %v", err) + } + if set.Status.Replicas != 3 { + t.Fatal("Failed to scale StatefulSet to 3 replicas") + } + // scale the set and add a terminated pod + *set.Spec.Replicas = 4 + pods, err := spc.addTerminatingPod(set, 2) + if err != nil { + t.Fatal(err) + } + if err := ssc.UpdateStatefulSet(set, pods); err != nil { + t.Fatal(err) + } + pods, err = spc.podsLister.List(labels.Everything()) + if err != nil { + t.Fatalf("Error listing pods: %v", err) + } + if len(pods) != 3 { + t.Fatalf("Expected 3 pods, got %d", len(pods)) + } + sort.Sort(ascendingOrdinal(pods)) + spc.DeleteStatefulPod(set, pods[2]) + pods, err = spc.podsLister.List(labels.Everything()) + if err != nil { + t.Fatalf("Error listing pods: %v", err) + } + if len(pods) != 2 { + t.Fatalf("Expected 3 pods, got %d", len(pods)) + } + if err := scaleUpStatefulSetControl(set, ssc, spc); err != nil { + t.Fatalf("Unexpected error: %v", err) + } + set, err = spc.setsLister.StatefulSets(set.Namespace).Get(set.Name) + if err != nil { + t.Fatalf("Error getting updated StatefulSet: %v", err) + } + if set.Status.Replicas != 4 { + t.Fatal("Failed to scale StatefulSet to 3 replicas") + } +} + func TestDefaultStatefulSetControlUpdateSetStatusFailure(t *testing.T) { set := newStatefulSet(3) client := fake.NewSimpleClientset(set) @@ -690,7 +756,7 @@ func (spc *fakeStatefulPodControl) setPodInitStatus(set *apps.StatefulSet, ordin return spc.podsLister.Pods(set.Namespace).List(selector) } -func (spc *fakeStatefulPodControl) addTerminatedPod(set *apps.StatefulSet, ordinal int) ([]*v1.Pod, error) { +func (spc *fakeStatefulPodControl) addTerminatingPod(set *apps.StatefulSet, ordinal int) ([]*v1.Pod, error) { pod := newStatefulSetPod(set, ordinal) pod.Status.Phase = v1.PodRunning deleted := metav1.NewTime(time.Now()) @@ -906,7 +972,7 @@ func scaleDownStatefulSetControl(set *apps.StatefulSet, ssc StatefulSetControlIn if err != nil { return err } - if pods, err = spc.addTerminatedPod(set, ordinal); err != nil { + if pods, err = spc.addTerminatingPod(set, ordinal); err != nil { return err } if err = ssc.UpdateStatefulSet(set, pods); err != nil { diff --git a/vendor/k8s.io/kubernetes/pkg/controller/statefulset/stateful_set_test.go b/vendor/k8s.io/kubernetes/pkg/controller/statefulset/stateful_set_test.go index 71b24b91..1f73b051 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/statefulset/stateful_set_test.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/statefulset/stateful_set_test.go @@ -91,11 +91,11 @@ func TestStatefulSetControllerRespectsTermination(t *testing.T) { if set.Status.Replicas != 3 { t.Error("Falied to scale statefulset to 3 replicas") } - pods, err := spc.addTerminatedPod(set, 3) + pods, err := spc.addTerminatingPod(set, 3) if err != nil { t.Error(err) } - pods, err = spc.addTerminatedPod(set, 4) + pods, err = spc.addTerminatingPod(set, 4) if err != nil { t.Error(err) } @@ -669,7 +669,7 @@ func scaleDownStatefulSetController(set *apps.StatefulSet, ssc *StatefulSetContr spc.setsIndexer.Add(set) ssc.enqueueStatefulSet(set) fakeWorker(ssc) - pods, err = spc.addTerminatedPod(set, ord) + pods, err = spc.addTerminatingPod(set, ord) pod = getPodAtOrdinal(pods, ord) ssc.updatePod(&prev, pod) fakeWorker(ssc) @@ -679,7 +679,7 @@ func scaleDownStatefulSetController(set *apps.StatefulSet, ssc *StatefulSetContr for set.Status.Replicas > *set.Spec.Replicas { pods, err = spc.podsLister.Pods(set.Namespace).List(selector) ord := len(pods) - pods, err = spc.addTerminatedPod(set, ord) + pods, err = spc.addTerminatingPod(set, ord) pod = getPodAtOrdinal(pods, ord) ssc.updatePod(&prev, pod) fakeWorker(ssc) diff --git a/vendor/k8s.io/kubernetes/pkg/controller/statefulset/stateful_set_utils.go b/vendor/k8s.io/kubernetes/pkg/controller/statefulset/stateful_set_utils.go index 672fd5b1..ab8e3a4a 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/statefulset/stateful_set_utils.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/statefulset/stateful_set_utils.go @@ -221,14 +221,14 @@ func isFailed(pod *v1.Pod) bool { return pod.Status.Phase == v1.PodFailed } -// isTerminated returns true if pod's deletion Timestamp has been set -func isTerminated(pod *v1.Pod) bool { +// isTerminating returns true if pod's DeletionTimestamp has been set +func isTerminating(pod *v1.Pod) bool { return pod.DeletionTimestamp != nil } // isHealthy returns true if pod is running and ready and has not been terminated func isHealthy(pod *v1.Pod) bool { - return isRunningAndReady(pod) && !isTerminated(pod) + return isRunningAndReady(pod) && !isTerminating(pod) } // newControllerRef returns an ControllerRef pointing to a given StatefulSet. diff --git a/vendor/k8s.io/kubernetes/pkg/generated/openapi/zz_generated.openapi.go b/vendor/k8s.io/kubernetes/pkg/generated/openapi/zz_generated.openapi.go index 4b38767d..3de3ec3b 100644 --- a/vendor/k8s.io/kubernetes/pkg/generated/openapi/zz_generated.openapi.go +++ b/vendor/k8s.io/kubernetes/pkg/generated/openapi/zz_generated.openapi.go @@ -12227,7 +12227,7 @@ func GetOpenAPIDefinitions(ref openapi.ReferenceCallback) map[string]openapi.Ope }, "concurrencyPolicy": { SchemaProps: spec.SchemaProps{ - Description: "ConcurrencyPolicy specifies how to treat concurrent executions of a Job.", + Description: "ConcurrencyPolicy specifies how to treat concurrent executions of a Job. Defaults to Allow.", Type: []string{"string"}, Format: "", }, diff --git a/vendor/k8s.io/kubernetes/pkg/kubeapiserver/options/authorization.go b/vendor/k8s.io/kubernetes/pkg/kubeapiserver/options/authorization.go index b5c8aca1..510614c4 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubeapiserver/options/authorization.go +++ b/vendor/k8s.io/kubernetes/pkg/kubeapiserver/options/authorization.go @@ -75,14 +75,17 @@ func (s *BuiltInAuthorizationOptions) AddFlags(fs *pflag.FlagSet) { } -func (s *BuiltInAuthorizationOptions) ToAuthorizationConfig(informerFactory informers.SharedInformerFactory) authorizer.AuthorizationConfig { +func (s *BuiltInAuthorizationOptions) Modes() []string { modes := []string{} if len(s.Mode) > 0 { modes = strings.Split(s.Mode, ",") } + return modes +} +func (s *BuiltInAuthorizationOptions) ToAuthorizationConfig(informerFactory informers.SharedInformerFactory) authorizer.AuthorizationConfig { return authorizer.AuthorizationConfig{ - AuthorizationModes: modes, + AuthorizationModes: s.Modes(), PolicyFile: s.PolicyFile, WebhookConfigFile: s.WebhookConfigFile, WebhookCacheAuthorizedTTL: s.WebhookCacheAuthorizedTTL, diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/cm/container_manager_linux.go b/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/cm/container_manager_linux.go index fd88e627..7cbd8cca 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/cm/container_manager_linux.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/cm/container_manager_linux.go @@ -85,7 +85,7 @@ func (m *containerManager) doWork() { glog.Errorf("Unable to get docker version: %v", err) return } - version, err := utilversion.ParseSemantic(v.Version) + version, err := utilversion.ParseGeneric(v.Version) if err != nil { glog.Errorf("Unable to parse docker version %q: %v", v.Version, err) return diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_service.go b/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_service.go index f48d8b19..13f3bbf8 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_service.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_service.go @@ -388,6 +388,9 @@ func (ds *dockerService) getDockerAPIVersion() (*semver.Version, error) { } else { dv, err = ds.getDockerVersion() } + if err != nil { + return nil, err + } apiVersion, err := semver.Parse(dv.APIVersion) if err != nil { diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/security_context.go b/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/security_context.go index 0b9f3392..eb026cde 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/security_context.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/security_context.go @@ -163,7 +163,6 @@ func modifyHostNetworkOptionForContainer(hostNetwork bool, sandboxID string, hc hc.NetworkMode = dockercontainer.NetworkMode(sandboxNSMode) hc.IpcMode = dockercontainer.IpcMode(sandboxNSMode) hc.UTSMode = "" - hc.PidMode = "" if hostNetwork { hc.UTSMode = namespaceModeHost diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/security_context_test.go b/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/security_context_test.go index a0f54e95..9b89b467 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/security_context_test.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/security_context_test.go @@ -306,6 +306,7 @@ func TestModifyContainerNamespaceOptions(t *testing.T) { expected: &dockercontainer.HostConfig{ NetworkMode: dockercontainer.NetworkMode(sandboxNSMode), IpcMode: dockercontainer.IpcMode(sandboxNSMode), + PidMode: namespaceModeHost, }, }, } diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/eviction/eviction_manager.go b/vendor/k8s.io/kubernetes/pkg/kubelet/eviction/eviction_manager.go index a03601ee..a48f29df 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/eviction/eviction_manager.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/eviction/eviction_manager.go @@ -371,23 +371,22 @@ func (m *managerImpl) reclaimNodeLevelResources(resourceToReclaim v1.ResourceNam for _, nodeReclaimFunc := range nodeReclaimFuncs { // attempt to reclaim the pressured resource. reclaimed, err := nodeReclaimFunc() - if err == nil { - // update our local observations based on the amount reported to have been reclaimed. - // note: this is optimistic, other things could have been still consuming the pressured resource in the interim. - signal := resourceToSignal[resourceToReclaim] - value, ok := observations[signal] - if !ok { - glog.Errorf("eviction manager: unable to find value associated with signal %v", signal) - continue - } - value.available.Add(*reclaimed) + if err != nil { + glog.Warningf("eviction manager: unexpected error when attempting to reduce %v pressure: %v", resourceToReclaim, err) + } + // update our local observations based on the amount reported to have been reclaimed. + // note: this is optimistic, other things could have been still consuming the pressured resource in the interim. + signal := resourceToSignal[resourceToReclaim] + value, ok := observations[signal] + if !ok { + glog.Errorf("eviction manager: unable to find value associated with signal %v", signal) + continue + } + value.available.Add(*reclaimed) - // evaluate all current thresholds to see if with adjusted observations, we think we have met min reclaim goals - if len(thresholdsMet(m.thresholdsMet, observations, true)) == 0 { - return true - } - } else { - glog.Errorf("eviction manager: unexpected error when attempting to reduce %v pressure: %v", resourceToReclaim, err) + // evaluate all current thresholds to see if with adjusted observations, we think we have met min reclaim goals + if len(thresholdsMet(m.thresholdsMet, observations, true)) == 0 { + return true } } return false diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/eviction/helpers.go b/vendor/k8s.io/kubernetes/pkg/kubelet/eviction/helpers.go index 8ca971de..e158f080 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/eviction/helpers.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/eviction/helpers.go @@ -995,13 +995,10 @@ func deleteImages(imageGC ImageGC, reportBytesFreed bool) nodeReclaimFunc { return func() (*resource.Quantity, error) { glog.Infof("eviction manager: attempting to delete unused images") bytesFreed, err := imageGC.DeleteUnusedImages() - if err != nil { - return nil, err - } reclaimed := int64(0) if reportBytesFreed { reclaimed = bytesFreed } - return resource.NewQuantity(reclaimed, resource.BinarySI), nil + return resource.NewQuantity(reclaimed, resource.BinarySI), err } } diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/eviction/types.go b/vendor/k8s.io/kubernetes/pkg/kubelet/eviction/types.go index 977eb06a..874480c4 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/eviction/types.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/eviction/types.go @@ -76,7 +76,8 @@ type NodeProvider interface { // ImageGC is responsible for performing garbage collection of unused images. type ImageGC interface { - // DeleteUnusedImages deletes unused images and returns the number of bytes freed, or an error. + // DeleteUnusedImages deletes unused images and returns the number of bytes freed, and an error. + // This returns the bytes freed even if an error is returned. DeleteUnusedImages() (int64, error) } @@ -118,6 +119,8 @@ type thresholdsObservedAt map[evictionapi.Threshold]time.Time type nodeConditionsObservedAt map[v1.NodeConditionType]time.Time // nodeReclaimFunc is a function that knows how to reclaim a resource from the node without impacting pods. +// Returns the quantity of resources reclaimed and an error, if applicable. +// nodeReclaimFunc return the resources reclaimed even if an error occurs. type nodeReclaimFunc func() (*resource.Quantity, error) // nodeReclaimFuncs is an ordered list of nodeReclaimFunc diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet.go b/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet.go index 9584ef5f..9942ec3f 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet.go @@ -2053,7 +2053,7 @@ func (kl *Kubelet) updateRuntimeUp() { } // Only check specific conditions when runtime integration type is cri, // because the old integration doesn't populate any runtime condition. - if kl.kubeletConfiguration.EnableCRI { + if kl.kubeletConfiguration.EnableCRI && kl.kubeletConfiguration.ContainerRuntime != "rkt" { if s == nil { glog.Errorf("Container runtime status is nil") return diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_pods.go b/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_pods.go index e1eda165..937fba7c 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_pods.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_pods.go @@ -55,6 +55,7 @@ import ( "k8s.io/kubernetes/pkg/kubelet/status" kubetypes "k8s.io/kubernetes/pkg/kubelet/types" "k8s.io/kubernetes/pkg/kubelet/util/format" + "k8s.io/kubernetes/pkg/util" "k8s.io/kubernetes/pkg/util/term" "k8s.io/kubernetes/pkg/volume" "k8s.io/kubernetes/pkg/volume/util/volumehelper" @@ -135,7 +136,32 @@ func makeMounts(pod *v1.Pod, podDir string, container *v1.Container, hostName, h return nil, err } if mount.SubPath != "" { + fileinfo, err := os.Lstat(hostPath) + if err != nil { + return nil, err + } + perm := fileinfo.Mode() + hostPath = filepath.Join(hostPath, mount.SubPath) + + if subPathExists, err := util.FileExists(hostPath); err != nil { + glog.Errorf("Could not determine if subPath %s exists; will not attempt to change its permissions", hostPath) + } else if !subPathExists { + // Create the sub path now because if it's auto-created later when referenced, it may have an + // incorrect ownership and mode. For example, the sub path directory must have at least g+rwx + // when the pod specifies an fsGroup, and if the directory is not created here, Docker will + // later auto-create it with the incorrect mode 0750 + if err := os.MkdirAll(hostPath, perm); err != nil { + glog.Errorf("failed to mkdir:%s", hostPath) + return nil, err + } + + // chmod the sub path because umask may have prevented us from making the sub path with the same + // permissions as the mounter path + if err := os.Chmod(hostPath, perm); err != nil { + return nil, err + } + } } // Docker Volume Mounts fail on Windows if it is not of the form C:/ diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_container.go b/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_container.go index 5c519354..5fab0ffd 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_container.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_container.go @@ -357,10 +357,10 @@ func getTerminationMessage(status *runtimeapi.ContainerStatus, terminationMessag // readLastStringFromContainerLogs attempts to read up to the max log length from the end of the CRI log represented // by path. It reads up to max log lines. -func readLastStringFromContainerLogs(path string) string { +func (m *kubeGenericRuntimeManager) readLastStringFromContainerLogs(path string) string { value := int64(kubecontainer.MaxContainerTerminationMessageLogLines) buf, _ := circbuf.NewBuffer(kubecontainer.MaxContainerTerminationMessageLogLength) - if err := ReadLogs(path, &v1.PodLogOptions{TailLines: &value}, buf, buf); err != nil { + if err := m.ReadLogs(path, "", &v1.PodLogOptions{TailLines: &value}, buf, buf); err != nil { return fmt.Sprintf("Error on reading termination message from logs: %v", err) } return buf.String() @@ -414,7 +414,7 @@ func (m *kubeGenericRuntimeManager) getPodContainerStatuses(uid kubetypes.UID, n tMessage, checkLogs := getTerminationMessage(status, annotatedInfo.TerminationMessagePath, fallbackToLogs) if checkLogs { path := buildFullContainerLogsPath(uid, labeledInfo.ContainerName, annotatedInfo.RestartCount) - tMessage = readLastStringFromContainerLogs(path) + tMessage = m.readLastStringFromContainerLogs(path) } // Use the termination message written by the application is not empty if len(tMessage) != 0 { @@ -688,7 +688,7 @@ func (m *kubeGenericRuntimeManager) GetContainerLogs(pod *v1.Pod, containerID ku labeledInfo := getContainerInfoFromLabels(status.Labels) annotatedInfo := getContainerInfoFromAnnotations(status.Annotations) path := buildFullContainerLogsPath(pod.UID, labeledInfo.ContainerName, annotatedInfo.RestartCount) - return ReadLogs(path, logOptions, stdout, stderr) + return m.ReadLogs(path, containerID.ID, logOptions, stdout, stderr) } // GetExec gets the endpoint the runtime will serve the exec request from. diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_logs.go b/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_logs.go index 25a507f3..38675928 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_logs.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_logs.go @@ -32,6 +32,7 @@ import ( "github.com/golang/glog" "k8s.io/kubernetes/pkg/api/v1" + runtimeapi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime" "k8s.io/kubernetes/pkg/util/tail" ) @@ -54,6 +55,11 @@ const ( timeFormat = time.RFC3339Nano // blockSize is the block size used in tail. blockSize = 1024 + + // stateCheckPeriod is the period to check container state while following + // the container log. Kubelet should not keep following the log when the + // container is not running. + stateCheckPeriod = 5 * time.Second ) var ( @@ -110,7 +116,9 @@ func newLogOptions(apiOpts *v1.PodLogOptions, now time.Time) *logOptions { } // ReadLogs read the container log and redirect into stdout and stderr. -func ReadLogs(path string, apiOpts *v1.PodLogOptions, stdout, stderr io.Writer) error { +// Note that containerID is only needed when following the log, or else +// just pass in empty string "". +func (m *kubeGenericRuntimeManager) ReadLogs(path, containerID string, apiOpts *v1.PodLogOptions, stdout, stderr io.Writer) error { f, err := os.Open(path) if err != nil { return fmt.Errorf("failed to open log file %q: %v", path, err) @@ -166,8 +174,8 @@ func ReadLogs(path string, apiOpts *v1.PodLogOptions, stdout, stderr io.Writer) } } // Wait until the next log change. - if err := waitLogs(watcher); err != nil { - return fmt.Errorf("failed to wait logs for log file %q: %v", path, err) + if found, err := m.waitLogs(containerID, watcher); !found { + return err } continue } @@ -196,6 +204,41 @@ func ReadLogs(path string, apiOpts *v1.PodLogOptions, stdout, stderr io.Writer) } } +// waitLogs wait for the next log write. It returns a boolean and an error. The boolean +// indicates whether a new log is found; the error is error happens during waiting new logs. +func (m *kubeGenericRuntimeManager) waitLogs(id string, w *fsnotify.Watcher) (bool, error) { + errRetry := 5 + for { + select { + case e := <-w.Events: + switch e.Op { + case fsnotify.Write: + return true, nil + default: + glog.Errorf("Unexpected fsnotify event: %v, retrying...", e) + } + case err := <-w.Errors: + glog.Errorf("Fsnotify watch error: %v, %d error retries remaining", err, errRetry) + if errRetry == 0 { + return false, err + } + errRetry-- + case <-time.After(stateCheckPeriod): + s, err := m.runtimeService.ContainerStatus(id) + if err != nil { + return false, err + } + // Only keep following container log when it is running. + if s.State != runtimeapi.ContainerState_CONTAINER_RUNNING { + glog.Errorf("Container %q is not running (state=%q)", id, s.State) + // Do not return error because it's normal that the container stops + // during waiting. + return false, nil + } + } + } +} + // parseFunc is a function parsing one log line to the internal log type. // Notice that the caller must make sure logMessage is not nil. type parseFunc func([]byte, *logMessage) error @@ -267,28 +310,6 @@ func getParseFunc(log []byte) (parseFunc, error) { return nil, fmt.Errorf("unsupported log format: %q", log) } -// waitLogs wait for the next log write. -func waitLogs(w *fsnotify.Watcher) error { - errRetry := 5 - for { - select { - case e := <-w.Events: - switch e.Op { - case fsnotify.Write: - return nil - default: - glog.Errorf("Unexpected fsnotify event: %v, retrying...", e) - } - case err := <-w.Errors: - glog.Errorf("Fsnotify watch error: %v, %d error retries remaining", err, errRetry) - if errRetry == 0 { - return err - } - errRetry-- - } - } -} - // logWriter controls the writing into the stream based on the log options. type logWriter struct { stdout io.Writer diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/security_context.go b/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/security_context.go index 4ccbe1a4..0cc784fc 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/security_context.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/security_context.go @@ -41,24 +41,24 @@ func (m *kubeGenericRuntimeManager) determineEffectiveSecurityContext(pod *v1.Po } // set namespace options and supplemental groups. - podSc := pod.Spec.SecurityContext - if podSc == nil { - return synthesized - } synthesized.NamespaceOptions = &runtimeapi.NamespaceOption{ HostNetwork: pod.Spec.HostNetwork, HostIpc: pod.Spec.HostIPC, HostPid: pod.Spec.HostPID, } - if podSc.FSGroup != nil { - synthesized.SupplementalGroups = append(synthesized.SupplementalGroups, *podSc.FSGroup) + podSc := pod.Spec.SecurityContext + if podSc != nil { + if podSc.FSGroup != nil { + synthesized.SupplementalGroups = append(synthesized.SupplementalGroups, *podSc.FSGroup) + } + + if podSc.SupplementalGroups != nil { + synthesized.SupplementalGroups = append(synthesized.SupplementalGroups, podSc.SupplementalGroups...) + } } if groups := m.runtimeHelper.GetExtraSupplementalGroupsForPod(pod); len(groups) > 0 { synthesized.SupplementalGroups = append(synthesized.SupplementalGroups, groups...) } - if podSc.SupplementalGroups != nil { - synthesized.SupplementalGroups = append(synthesized.SupplementalGroups, podSc.SupplementalGroups...) - } return synthesized } diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/remote/remote_runtime.go b/vendor/k8s.io/kubernetes/pkg/kubelet/remote/remote_runtime.go index 8a4e29f7..0b0ef219 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/remote/remote_runtime.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/remote/remote_runtime.go @@ -209,7 +209,11 @@ func (r *RemoteRuntimeService) StartContainer(containerID string) error { // StopContainer stops a running container with a grace period (i.e., timeout). func (r *RemoteRuntimeService) StopContainer(containerID string, timeout int64) error { - ctx, cancel := getContextWithTimeout(r.timeout) + ctx, cancel := getContextWithTimeout(time.Duration(timeout) * time.Second) + if timeout == 0 { + // Use default timeout if stop timeout is 0. + ctx, cancel = getContextWithTimeout(r.timeout) + } defer cancel() _, err := r.runtimeClient.StopContainer(ctx, &runtimeapi.StopContainerRequest{ diff --git a/vendor/k8s.io/kubernetes/pkg/proxy/healthcheck/BUILD b/vendor/k8s.io/kubernetes/pkg/proxy/healthcheck/BUILD index 5eea7679..c7503ac8 100644 --- a/vendor/k8s.io/kubernetes/pkg/proxy/healthcheck/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/proxy/healthcheck/BUILD @@ -11,20 +11,17 @@ load( go_library( name = "go_default_library", srcs = [ - "api.go", "doc.go", "healthcheck.go", - "http.go", - "listener.go", - "worker.go", ], tags = ["automanaged"], deps = [ "//vendor:github.com/golang/glog", + "//vendor:github.com/renstrom/dedent", "//vendor:k8s.io/apimachinery/pkg/types", - "//vendor:k8s.io/apimachinery/pkg/util/sets", - "//vendor:k8s.io/apimachinery/pkg/util/wait", - "//vendor:k8s.io/client-go/tools/cache", + "//vendor:k8s.io/client-go/pkg/api", + "//vendor:k8s.io/client-go/pkg/api/v1", + "//vendor:k8s.io/client-go/tools/record", ], ) @@ -34,6 +31,7 @@ go_test( library = ":go_default_library", tags = ["automanaged"], deps = [ + "//vendor:github.com/davecgh/go-spew/spew", "//vendor:k8s.io/apimachinery/pkg/types", "//vendor:k8s.io/apimachinery/pkg/util/sets", ], diff --git a/vendor/k8s.io/kubernetes/pkg/proxy/healthcheck/api.go b/vendor/k8s.io/kubernetes/pkg/proxy/healthcheck/api.go deleted file mode 100644 index 91aa3bd7..00000000 --- a/vendor/k8s.io/kubernetes/pkg/proxy/healthcheck/api.go +++ /dev/null @@ -1,65 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package healthcheck - -import ( - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/apimachinery/pkg/util/wait" -) - -// All public API Methods for this package - -// UpdateEndpoints Update the set of local endpoints for a service -func UpdateEndpoints(serviceName types.NamespacedName, endpointUids sets.String) { - req := &proxyMutationRequest{ - serviceName: serviceName, - endpointUids: &endpointUids, - } - healthchecker.mutationRequestChannel <- req -} - -func updateServiceListener(serviceName types.NamespacedName, listenPort int, add bool) bool { - responseChannel := make(chan bool) - req := &proxyListenerRequest{ - serviceName: serviceName, - listenPort: uint16(listenPort), - add: add, - responseChannel: responseChannel, - } - healthchecker.listenerRequestChannel <- req - return <-responseChannel -} - -// AddServiceListener Request addition of a listener for a service's health check -func AddServiceListener(serviceName types.NamespacedName, listenPort int) bool { - return updateServiceListener(serviceName, listenPort, true) -} - -// DeleteServiceListener Request deletion of a listener for a service's health check -func DeleteServiceListener(serviceName types.NamespacedName, listenPort int) bool { - return updateServiceListener(serviceName, listenPort, false) -} - -// Run Start the healthchecker main loop -func Run() { - healthchecker = proxyHealthCheckFactory() - // Wrap with a wait.Forever to handle panics. - go wait.Forever(func() { - healthchecker.handlerLoop() - }, 0) -} diff --git a/vendor/k8s.io/kubernetes/pkg/proxy/healthcheck/doc.go b/vendor/k8s.io/kubernetes/pkg/proxy/healthcheck/doc.go index 56ecc11e..0a9ea094 100644 --- a/vendor/k8s.io/kubernetes/pkg/proxy/healthcheck/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/proxy/healthcheck/doc.go @@ -14,5 +14,5 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Package healthcheck LoadBalancer Healthcheck responder library for kubernetes network proxies +// Package healthcheck provides tools for serving kube-proxy healthchecks. package healthcheck // import "k8s.io/kubernetes/pkg/proxy/healthcheck" diff --git a/vendor/k8s.io/kubernetes/pkg/proxy/healthcheck/healthcheck.go b/vendor/k8s.io/kubernetes/pkg/proxy/healthcheck/healthcheck.go index e9dfe86e..999bc1b8 100644 --- a/vendor/k8s.io/kubernetes/pkg/proxy/healthcheck/healthcheck.go +++ b/vendor/k8s.io/kubernetes/pkg/proxy/healthcheck/healthcheck.go @@ -20,108 +20,216 @@ import ( "fmt" "net" "net/http" + "strings" + "sync" "github.com/golang/glog" + "github.com/renstrom/dedent" + "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/client-go/tools/cache" + "k8s.io/client-go/pkg/api" + clientv1 "k8s.io/client-go/pkg/api/v1" + "k8s.io/client-go/tools/record" ) -// proxyMutationRequest: Message to request addition/deletion of endpoints for a service -type proxyMutationRequest struct { - serviceName types.NamespacedName - endpointUids *sets.String +// Server serves HTTP endpoints for each service name, with results +// based on the endpoints. If there are 0 endpoints for a service, it returns a +// 503 "Service Unavailable" error (telling LBs not to use this node). If there +// are 1 or more endpoints, it returns a 200 "OK". +type Server interface { + // Make the new set of services be active. Services that were open before + // will be closed. Services that are new will be opened. Service that + // existed and are in the new set will be left alone. The value of the map + // is the healthcheck-port to listen on. + SyncServices(newServices map[types.NamespacedName]uint16) error + // Make the new set of endpoints be active. Endpoints for services that do + // not exist will be dropped. The value of the map is the number of + // endpoints the service has on this node. + SyncEndpoints(newEndpoints map[types.NamespacedName]int) error } -// proxyListenerRequest: Message to request addition/deletion of a service responder on a listening port -type proxyListenerRequest struct { - serviceName types.NamespacedName - listenPort uint16 - add bool - responseChannel chan bool +// Listener allows for testing of Server. If the Listener argument +// to NewServer() is nil, the real net.Listen function will be used. +type Listener interface { + // Listen is very much like net.Listen, except the first arg (network) is + // fixed to be "tcp". + Listen(addr string) (net.Listener, error) } -// serviceEndpointsList: A list of endpoints for a service -type serviceEndpointsList struct { - serviceName types.NamespacedName - endpoints *sets.String +// HTTPServerFactory allows for testing of Server. If the +// HTTPServerFactory argument to NewServer() is nil, the real +// http.Server type will be used. +type HTTPServerFactory interface { + // New creates an instance of a type satisfying HTTPServer. This is + // designed to include http.Server. + New(addr string, handler http.Handler) HTTPServer } -// serviceResponder: Contains net/http datastructures necessary for responding to each Service's health check on its aux nodePort -type serviceResponder struct { - serviceName types.NamespacedName - listenPort uint16 - listener *net.Listener - server *http.Server +// HTTPServer allows for testing of Server. +type HTTPServer interface { + // Server is designed so that http.Server satifies this interface, + Serve(listener net.Listener) error } -// proxyHC: Handler structure for health check, endpoint add/delete and service listener add/delete requests -type proxyHC struct { - serviceEndpointsMap cache.ThreadSafeStore - serviceResponderMap map[types.NamespacedName]serviceResponder - mutationRequestChannel chan *proxyMutationRequest - listenerRequestChannel chan *proxyListenerRequest -} - -// handleHealthCheckRequest - received a health check request - lookup and respond to HC. -func (h *proxyHC) handleHealthCheckRequest(rw http.ResponseWriter, serviceName string) { - s, ok := h.serviceEndpointsMap.Get(serviceName) - if !ok { - glog.V(4).Infof("Service %s not found or has no local endpoints", serviceName) - sendHealthCheckResponse(rw, http.StatusServiceUnavailable, "No Service Endpoints Found") - return +// NewServer allocates a new healthcheck server manager. If either +// of the injected arguments are nil, defaults will be used. +func NewServer(hostname string, recorder record.EventRecorder, listener Listener, httpServerFactory HTTPServerFactory) Server { + if listener == nil { + listener = stdNetListener{} } - numEndpoints := len(*s.(*serviceEndpointsList).endpoints) - if numEndpoints > 0 { - sendHealthCheckResponse(rw, http.StatusOK, fmt.Sprintf("%d Service Endpoints found", numEndpoints)) - return + if httpServerFactory == nil { + httpServerFactory = stdHTTPServerFactory{} + } + return &server{ + hostname: hostname, + recorder: recorder, + listener: listener, + httpFactory: httpServerFactory, + services: map[types.NamespacedName]*hcInstance{}, } - sendHealthCheckResponse(rw, http.StatusServiceUnavailable, "0 local Endpoints are alive") } -// handleMutationRequest - receive requests to mutate the table entry for a service -func (h *proxyHC) handleMutationRequest(req *proxyMutationRequest) { - numEndpoints := len(*req.endpointUids) - glog.V(4).Infof("LB service health check mutation request Service: %s - %d Endpoints %v", - req.serviceName, numEndpoints, (*req.endpointUids).List()) - if numEndpoints == 0 { - if _, ok := h.serviceEndpointsMap.Get(req.serviceName.String()); ok { - glog.V(4).Infof("Deleting endpoints map for service %s, all local endpoints gone", req.serviceName.String()) - h.serviceEndpointsMap.Delete(req.serviceName.String()) - } - return +// Implement Listener in terms of net.Listen. +type stdNetListener struct{} + +func (stdNetListener) Listen(addr string) (net.Listener, error) { + return net.Listen("tcp", addr) +} + +var _ Listener = stdNetListener{} + +// Implement HTTPServerFactory in terms of http.Server. +type stdHTTPServerFactory struct{} + +func (stdHTTPServerFactory) New(addr string, handler http.Handler) HTTPServer { + return &http.Server{ + Addr: addr, + Handler: handler, } - var entry *serviceEndpointsList - e, exists := h.serviceEndpointsMap.Get(req.serviceName.String()) - if exists { - entry = e.(*serviceEndpointsList) - if entry.endpoints.Equal(*req.endpointUids) { - return - } - // Compute differences just for printing logs about additions and removals - deletedEndpoints := entry.endpoints.Difference(*req.endpointUids) - newEndpoints := req.endpointUids.Difference(*entry.endpoints) - for _, e := range newEndpoints.List() { - glog.V(4).Infof("Adding local endpoint %s to LB health check for service %s", - e, req.serviceName.String()) - } - for _, d := range deletedEndpoints.List() { - glog.V(4).Infof("Deleted endpoint %s from service %s LB health check (%d endpoints left)", - d, req.serviceName.String(), len(*entry.endpoints)) +} + +var _ HTTPServerFactory = stdHTTPServerFactory{} + +type server struct { + hostname string + recorder record.EventRecorder // can be nil + listener Listener + httpFactory HTTPServerFactory + + lock sync.Mutex + services map[types.NamespacedName]*hcInstance +} + +func (hcs *server) SyncServices(newServices map[types.NamespacedName]uint16) error { + hcs.lock.Lock() + defer hcs.lock.Unlock() + + // Remove any that are not needed any more. + for nsn, svc := range hcs.services { + if port, found := newServices[nsn]; !found || port != svc.port { + glog.V(2).Infof("Closing healthcheck %q on port %d", nsn.String(), svc.port) + if err := svc.listener.Close(); err != nil { + glog.Errorf("Close(%v): %v", svc.listener.Addr(), err) + } + delete(hcs.services, nsn) } } - entry = &serviceEndpointsList{serviceName: req.serviceName, endpoints: req.endpointUids} - h.serviceEndpointsMap.Add(req.serviceName.String(), entry) + + // Add any that are needed. + for nsn, port := range newServices { + if hcs.services[nsn] != nil { + glog.V(3).Infof("Existing healthcheck %q on port %d", nsn.String(), port) + continue + } + + glog.V(2).Infof("Opening healthcheck %q on port %d", nsn.String(), port) + svc := &hcInstance{port: port} + addr := fmt.Sprintf(":%d", port) + svc.server = hcs.httpFactory.New(addr, hcHandler{name: nsn, hcs: hcs}) + var err error + svc.listener, err = hcs.listener.Listen(addr) + if err != nil { + msg := fmt.Sprintf("node %s failed to start healthcheck %q on port %d: %v", hcs.hostname, nsn.String(), port, err) + + if hcs.recorder != nil { + hcs.recorder.Eventf( + &clientv1.ObjectReference{ + Kind: "Service", + Namespace: nsn.Namespace, + Name: nsn.Name, + UID: types.UID(nsn.String()), + }, api.EventTypeWarning, "FailedToStartHealthcheck", msg) + } + glog.Error(msg) + continue + } + hcs.services[nsn] = svc + + go func(nsn types.NamespacedName, svc *hcInstance) { + // Serve() will exit when the listener is closed. + glog.V(3).Infof("Starting goroutine for healthcheck %q on port %d", nsn.String(), svc.port) + if err := svc.server.Serve(svc.listener); err != nil { + glog.V(3).Infof("Healthcheck %q closed: %v", nsn.String(), err) + return + } + glog.V(3).Infof("Healthcheck %q closed", nsn.String()) + }(nsn, svc) + } + return nil } -// proxyHealthCheckRequest - Factory method to instantiate the health check handler -func proxyHealthCheckFactory() *proxyHC { - glog.V(2).Infof("Initializing kube-proxy health checker") - phc := &proxyHC{ - serviceEndpointsMap: cache.NewThreadSafeStore(cache.Indexers{}, cache.Indices{}), - serviceResponderMap: make(map[types.NamespacedName]serviceResponder), - mutationRequestChannel: make(chan *proxyMutationRequest, 1024), - listenerRequestChannel: make(chan *proxyListenerRequest, 1024), - } - return phc +type hcInstance struct { + port uint16 + listener net.Listener + server HTTPServer + endpoints int // number of local endpoints for a service +} + +type hcHandler struct { + name types.NamespacedName + hcs *server +} + +var _ http.Handler = hcHandler{} + +func (h hcHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request) { + h.hcs.lock.Lock() + count := h.hcs.services[h.name].endpoints + h.hcs.lock.Unlock() + + resp.Header().Set("Content-Type", "application/json") + if count == 0 { + resp.WriteHeader(http.StatusServiceUnavailable) + } else { + resp.WriteHeader(http.StatusOK) + } + fmt.Fprintf(resp, strings.Trim(dedent.Dedent(fmt.Sprintf(` + { + "service": { + "namespace": %q, + "name": %q + }, + "localEndpoints": %d + } + `, h.name.Namespace, h.name.Name, count)), "\n")) +} + +func (hcs *server) SyncEndpoints(newEndpoints map[types.NamespacedName]int) error { + hcs.lock.Lock() + defer hcs.lock.Unlock() + + for nsn, count := range newEndpoints { + if hcs.services[nsn] == nil { + glog.V(3).Infof("Not saving endpoints for unknown healthcheck %q", nsn.String()) + continue + } + glog.V(3).Infof("Reporting %d endpoints for healthcheck %q", count, nsn.String()) + hcs.services[nsn].endpoints = count + } + for nsn, hci := range hcs.services { + if _, found := newEndpoints[nsn]; !found { + hci.endpoints = 0 + } + } + return nil } diff --git a/vendor/k8s.io/kubernetes/pkg/proxy/healthcheck/healthcheck_test.go b/vendor/k8s.io/kubernetes/pkg/proxy/healthcheck/healthcheck_test.go index 72615589..a90a8a38 100644 --- a/vendor/k8s.io/kubernetes/pkg/proxy/healthcheck/healthcheck_test.go +++ b/vendor/k8s.io/kubernetes/pkg/proxy/healthcheck/healthcheck_test.go @@ -17,142 +17,341 @@ limitations under the License. package healthcheck import ( - "fmt" - "io/ioutil" - "math/rand" + "encoding/json" + "net" "net/http" + "net/http/httptest" "testing" - "time" + + "github.com/davecgh/go-spew/spew" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/sets" ) -type TestCaseData struct { - nodePorts int - numEndpoints int - nodePortList []int - svcNames []types.NamespacedName +type fakeListener struct { + openPorts sets.String } -const ( - startPort = 20000 - endPort = 40000 -) - -var ( - choices = []byte("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ") -) - -func generateRandomString(n int) string { - - b := make([]byte, n) - l := len(choices) - for i := range b { - b[i] = choices[rand.Intn(l)] - } - return string(b) -} - -func chooseServiceName(tc int, hint int) types.NamespacedName { - var svc types.NamespacedName - svc.Namespace = fmt.Sprintf("ns_%d", tc) - svc.Name = fmt.Sprintf("name_%d", hint) - return svc -} - -func generateEndpointSet(max int) sets.String { - s := sets.NewString() - for i := 0; i < max; i++ { - s.Insert(fmt.Sprintf("%d%s", i, generateRandomString(8))) - } - return s -} - -func verifyHealthChecks(tc *TestCaseData, t *testing.T) bool { - var success = true - time.Sleep(100 * time.Millisecond) - for i := 0; i < tc.nodePorts; i++ { - t.Logf("Validating HealthCheck works for svc %s nodePort %d\n", tc.svcNames[i], tc.nodePortList[i]) - res, err := http.Get(fmt.Sprintf("http://127.0.0.1:%d/", tc.nodePortList[i])) - if err != nil { - t.Logf("ERROR: Failed to connect to listening port") - success = false - continue - } - robots, err := ioutil.ReadAll(res.Body) - if res.StatusCode == http.StatusServiceUnavailable { - t.Logf("ERROR: HealthCheck returned %s: %s", res.Status, string(robots)) - success = false - continue - } - res.Body.Close() - if err != nil { - t.Logf("Error: reading body of response (%s)", err) - success = false - continue - } - } - if success { - t.Logf("Success: All nodePorts found active") - } - return success -} - -func TestHealthChecker(t *testing.T) { - testcases := []TestCaseData{ - { - nodePorts: 1, - numEndpoints: 2, - }, - { - nodePorts: 10, - numEndpoints: 6, - }, - { - nodePorts: 100, - numEndpoints: 1, - }, - } - - Run() - - ports := startPort - for n, tc := range testcases { - tc.nodePortList = make([]int, tc.nodePorts) - tc.svcNames = make([]types.NamespacedName, tc.nodePorts) - for i := 0; i < tc.nodePorts; i++ { - tc.svcNames[i] = chooseServiceName(n, i) - t.Logf("Updating endpoints map for %s %d", tc.svcNames[i], tc.numEndpoints) - for { - UpdateEndpoints(tc.svcNames[i], generateEndpointSet(tc.numEndpoints)) - tc.nodePortList[i] = ports - ports++ - if AddServiceListener(tc.svcNames[i], tc.nodePortList[i]) { - break - } - DeleteServiceListener(tc.svcNames[i], tc.nodePortList[i]) - // Keep searching for a port that works - t.Logf("Failed to bind/listen on port %d...trying next port", ports-1) - if ports > endPort { - t.Errorf("Exhausted range of ports available for tests") - return - } - } - } - t.Logf("Validating if all nodePorts for tc %d work", n) - if !verifyHealthChecks(&tc, t) { - t.Errorf("Healthcheck validation failed") - } - - for i := 0; i < tc.nodePorts; i++ { - DeleteServiceListener(tc.svcNames[i], tc.nodePortList[i]) - UpdateEndpoints(tc.svcNames[i], sets.NewString()) - } - - // Ensure that all listeners have been shutdown - if verifyHealthChecks(&tc, t) { - t.Errorf("Healthcheck validation failed") - } +func newFakeListener() *fakeListener { + return &fakeListener{ + openPorts: sets.String{}, + } +} + +func (fake *fakeListener) hasPort(addr string) bool { + return fake.openPorts.Has(addr) +} + +func (fake *fakeListener) Listen(addr string) (net.Listener, error) { + fake.openPorts.Insert(addr) + return &fakeNetListener{ + parent: fake, + addr: addr, + }, nil +} + +type fakeNetListener struct { + parent *fakeListener + addr string +} + +func (fake *fakeNetListener) Accept() (net.Conn, error) { + // Not implemented + return nil, nil +} + +func (fake *fakeNetListener) Close() error { + fake.parent.openPorts.Delete(fake.addr) + return nil +} + +func (fake *fakeNetListener) Addr() net.Addr { + // Not implemented + return nil +} + +type fakeHTTPServerFactory struct{} + +func newFakeHTTPServerFactory() *fakeHTTPServerFactory { + return &fakeHTTPServerFactory{} +} + +func (fake *fakeHTTPServerFactory) New(addr string, handler http.Handler) HTTPServer { + return &fakeHTTPServer{ + addr: addr, + handler: handler, + } +} + +type fakeHTTPServer struct { + addr string + handler http.Handler +} + +func (fake *fakeHTTPServer) Serve(listener net.Listener) error { + return nil // Cause the goroutine to return +} + +func mknsn(ns, name string) types.NamespacedName { + return types.NamespacedName{ + Namespace: ns, + Name: name, + } +} + +type hcPayload struct { + Service struct { + Namespace string + Name string + } + LocalEndpoints int +} + +func TestServer(t *testing.T) { + listener := newFakeListener() + httpFactory := newFakeHTTPServerFactory() + + hcsi := NewServer("hostname", nil, listener, httpFactory) + hcs := hcsi.(*server) + if len(hcs.services) != 0 { + t.Errorf("expected 0 services, got %d", len(hcs.services)) + } + + // sync nothing + hcs.SyncServices(nil) + if len(hcs.services) != 0 { + t.Errorf("expected 0 services, got %d", len(hcs.services)) + } + hcs.SyncEndpoints(nil) + if len(hcs.services) != 0 { + t.Errorf("expected 0 services, got %d", len(hcs.services)) + } + + // sync unknown endpoints, should be dropped + hcs.SyncEndpoints(map[types.NamespacedName]int{mknsn("a", "b"): 93}) + if len(hcs.services) != 0 { + t.Errorf("expected 0 services, got %d", len(hcs.services)) + } + + // sync a real service + nsn := mknsn("a", "b") + hcs.SyncServices(map[types.NamespacedName]uint16{nsn: 9376}) + if len(hcs.services) != 1 { + t.Errorf("expected 1 service, got %d", len(hcs.services)) + } + if hcs.services[nsn].endpoints != 0 { + t.Errorf("expected 0 endpoints, got %d", hcs.services[nsn].endpoints) + } + if len(listener.openPorts) != 1 { + t.Errorf("expected 1 open port, got %d\n%s", len(listener.openPorts), spew.Sdump(listener.openPorts)) + } + if !listener.hasPort(":9376") { + t.Errorf("expected port :9376 to be open\n%s", spew.Sdump(listener.openPorts)) + } + // test the handler + testHandler(hcs, nsn, http.StatusServiceUnavailable, 0, t) + + // sync an endpoint + hcs.SyncEndpoints(map[types.NamespacedName]int{nsn: 18}) + if len(hcs.services) != 1 { + t.Errorf("expected 1 service, got %d", len(hcs.services)) + } + if hcs.services[nsn].endpoints != 18 { + t.Errorf("expected 18 endpoints, got %d", hcs.services[nsn].endpoints) + } + // test the handler + testHandler(hcs, nsn, http.StatusOK, 18, t) + + // sync zero endpoints + hcs.SyncEndpoints(map[types.NamespacedName]int{nsn: 0}) + if len(hcs.services) != 1 { + t.Errorf("expected 1 service, got %d", len(hcs.services)) + } + if hcs.services[nsn].endpoints != 0 { + t.Errorf("expected 0 endpoints, got %d", hcs.services[nsn].endpoints) + } + // test the handler + testHandler(hcs, nsn, http.StatusServiceUnavailable, 0, t) + + // put the endpoint back + hcs.SyncEndpoints(map[types.NamespacedName]int{nsn: 11}) + if len(hcs.services) != 1 { + t.Errorf("expected 1 service, got %d", len(hcs.services)) + } + if hcs.services[nsn].endpoints != 11 { + t.Errorf("expected 18 endpoints, got %d", hcs.services[nsn].endpoints) + } + // sync nil endpoints + hcs.SyncEndpoints(nil) + if len(hcs.services) != 1 { + t.Errorf("expected 1 service, got %d", len(hcs.services)) + } + if hcs.services[nsn].endpoints != 0 { + t.Errorf("expected 0 endpoints, got %d", hcs.services[nsn].endpoints) + } + // test the handler + testHandler(hcs, nsn, http.StatusServiceUnavailable, 0, t) + + // put the endpoint back + hcs.SyncEndpoints(map[types.NamespacedName]int{nsn: 18}) + if len(hcs.services) != 1 { + t.Errorf("expected 1 service, got %d", len(hcs.services)) + } + if hcs.services[nsn].endpoints != 18 { + t.Errorf("expected 18 endpoints, got %d", hcs.services[nsn].endpoints) + } + // delete the service + hcs.SyncServices(nil) + if len(hcs.services) != 0 { + t.Errorf("expected 0 services, got %d", len(hcs.services)) + } + + // sync multiple services + nsn1 := mknsn("a", "b") + nsn2 := mknsn("c", "d") + nsn3 := mknsn("e", "f") + nsn4 := mknsn("g", "h") + hcs.SyncServices(map[types.NamespacedName]uint16{ + nsn1: 9376, + nsn2: 12909, + nsn3: 11113, + }) + if len(hcs.services) != 3 { + t.Errorf("expected 3 service, got %d", len(hcs.services)) + } + if hcs.services[nsn1].endpoints != 0 { + t.Errorf("expected 0 endpoints, got %d", hcs.services[nsn1].endpoints) + } + if hcs.services[nsn2].endpoints != 0 { + t.Errorf("expected 0 endpoints, got %d", hcs.services[nsn2].endpoints) + } + if hcs.services[nsn3].endpoints != 0 { + t.Errorf("expected 0 endpoints, got %d", hcs.services[nsn3].endpoints) + } + if len(listener.openPorts) != 3 { + t.Errorf("expected 3 open ports, got %d\n%s", len(listener.openPorts), spew.Sdump(listener.openPorts)) + } + // test the handlers + testHandler(hcs, nsn1, http.StatusServiceUnavailable, 0, t) + testHandler(hcs, nsn2, http.StatusServiceUnavailable, 0, t) + testHandler(hcs, nsn3, http.StatusServiceUnavailable, 0, t) + + // sync endpoints + hcs.SyncEndpoints(map[types.NamespacedName]int{ + nsn1: 9, + nsn2: 3, + nsn3: 7, + }) + if len(hcs.services) != 3 { + t.Errorf("expected 3 services, got %d", len(hcs.services)) + } + if hcs.services[nsn1].endpoints != 9 { + t.Errorf("expected 9 endpoints, got %d", hcs.services[nsn1].endpoints) + } + if hcs.services[nsn2].endpoints != 3 { + t.Errorf("expected 3 endpoints, got %d", hcs.services[nsn2].endpoints) + } + if hcs.services[nsn3].endpoints != 7 { + t.Errorf("expected 7 endpoints, got %d", hcs.services[nsn3].endpoints) + } + // test the handlers + testHandler(hcs, nsn1, http.StatusOK, 9, t) + testHandler(hcs, nsn2, http.StatusOK, 3, t) + testHandler(hcs, nsn3, http.StatusOK, 7, t) + + // sync new services + hcs.SyncServices(map[types.NamespacedName]uint16{ + //nsn1: 9376, // remove it + nsn2: 12909, // leave it + nsn3: 11114, // change it + nsn4: 11878, // add it + }) + if len(hcs.services) != 3 { + t.Errorf("expected 3 service, got %d", len(hcs.services)) + } + if hcs.services[nsn2].endpoints != 3 { + t.Errorf("expected 3 endpoints, got %d", hcs.services[nsn2].endpoints) + } + if hcs.services[nsn3].endpoints != 0 { + t.Errorf("expected 0 endpoints, got %d", hcs.services[nsn3].endpoints) + } + if hcs.services[nsn4].endpoints != 0 { + t.Errorf("expected 0 endpoints, got %d", hcs.services[nsn4].endpoints) + } + // test the handlers + testHandler(hcs, nsn2, http.StatusOK, 3, t) + testHandler(hcs, nsn3, http.StatusServiceUnavailable, 0, t) + testHandler(hcs, nsn4, http.StatusServiceUnavailable, 0, t) + + // sync endpoints + hcs.SyncEndpoints(map[types.NamespacedName]int{ + nsn1: 9, + nsn2: 3, + nsn3: 7, + nsn4: 6, + }) + if len(hcs.services) != 3 { + t.Errorf("expected 3 services, got %d", len(hcs.services)) + } + if hcs.services[nsn2].endpoints != 3 { + t.Errorf("expected 3 endpoints, got %d", hcs.services[nsn2].endpoints) + } + if hcs.services[nsn3].endpoints != 7 { + t.Errorf("expected 7 endpoints, got %d", hcs.services[nsn3].endpoints) + } + if hcs.services[nsn4].endpoints != 6 { + t.Errorf("expected 6 endpoints, got %d", hcs.services[nsn4].endpoints) + } + // test the handlers + testHandler(hcs, nsn2, http.StatusOK, 3, t) + testHandler(hcs, nsn3, http.StatusOK, 7, t) + testHandler(hcs, nsn4, http.StatusOK, 6, t) + + // sync endpoints, missing nsn2 + hcs.SyncEndpoints(map[types.NamespacedName]int{ + nsn3: 7, + nsn4: 6, + }) + if len(hcs.services) != 3 { + t.Errorf("expected 3 services, got %d", len(hcs.services)) + } + if hcs.services[nsn2].endpoints != 0 { + t.Errorf("expected 0 endpoints, got %d", hcs.services[nsn2].endpoints) + } + if hcs.services[nsn3].endpoints != 7 { + t.Errorf("expected 7 endpoints, got %d", hcs.services[nsn3].endpoints) + } + if hcs.services[nsn4].endpoints != 6 { + t.Errorf("expected 6 endpoints, got %d", hcs.services[nsn4].endpoints) + } + // test the handlers + testHandler(hcs, nsn2, http.StatusServiceUnavailable, 0, t) + testHandler(hcs, nsn3, http.StatusOK, 7, t) + testHandler(hcs, nsn4, http.StatusOK, 6, t) +} + +func testHandler(hcs *server, nsn types.NamespacedName, status int, endpoints int, t *testing.T) { + handler := hcs.services[nsn].server.(*fakeHTTPServer).handler + req, err := http.NewRequest("GET", "/healthz", nil) + if err != nil { + t.Fatal(err) + } + resp := httptest.NewRecorder() + + handler.ServeHTTP(resp, req) + + if resp.Code != status { + t.Errorf("expected status code %v, got %v", status, resp.Code) + } + var payload hcPayload + if err := json.Unmarshal(resp.Body.Bytes(), &payload); err != nil { + t.Fatal(err) + } + if payload.Service.Name != nsn.Name || payload.Service.Namespace != nsn.Namespace { + t.Errorf("expected payload name %q, got %v", nsn.String(), payload.Service) + } + if payload.LocalEndpoints != endpoints { + t.Errorf("expected %d endpoints, got %d", endpoints, payload.LocalEndpoints) } } diff --git a/vendor/k8s.io/kubernetes/pkg/proxy/healthcheck/http.go b/vendor/k8s.io/kubernetes/pkg/proxy/healthcheck/http.go deleted file mode 100644 index dd3dcf3a..00000000 --- a/vendor/k8s.io/kubernetes/pkg/proxy/healthcheck/http.go +++ /dev/null @@ -1,46 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package healthcheck - -import ( - "fmt" - "net/http" - - "github.com/golang/glog" -) - -// A healthCheckHandler serves http requests on /healthz on the service health check node port, -// and responds to every request with either: -// 200 OK and the count of endpoints for the given service that are local to this node. -// or -// 503 Service Unavailable If the count is zero or the service does not exist -type healthCheckHandler struct { - svcNsName string -} - -// HTTP Utility function to send the required statusCode and error text to a http.ResponseWriter object -func sendHealthCheckResponse(rw http.ResponseWriter, statusCode int, error string) { - rw.Header().Set("Content-Type", "text/plain") - rw.WriteHeader(statusCode) - fmt.Fprint(rw, error) -} - -// ServeHTTP: Interface callback method for net.Listener Handlers -func (h healthCheckHandler) ServeHTTP(response http.ResponseWriter, req *http.Request) { - glog.V(4).Infof("Received HC Request Service %s from Cloud Load Balancer", h.svcNsName) - healthchecker.handleHealthCheckRequest(response, h.svcNsName) -} diff --git a/vendor/k8s.io/kubernetes/pkg/proxy/healthcheck/listener.go b/vendor/k8s.io/kubernetes/pkg/proxy/healthcheck/listener.go deleted file mode 100644 index d61e741c..00000000 --- a/vendor/k8s.io/kubernetes/pkg/proxy/healthcheck/listener.go +++ /dev/null @@ -1,77 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package healthcheck - -// Create/Delete dynamic listeners on the required nodePorts - -import ( - "fmt" - "net" - "net/http" - - "github.com/golang/glog" -) - -// handleServiceListenerRequest: receive requests to add/remove service health check listening ports -func (h *proxyHC) handleServiceListenerRequest(req *proxyListenerRequest) bool { - sr, serviceFound := h.serviceResponderMap[req.serviceName] - if !req.add { - if !serviceFound { - return false - } - glog.Infof("Deleting HealthCheckListenPort for service %s port %d", - req.serviceName, req.listenPort) - delete(h.serviceResponderMap, req.serviceName) - (*sr.listener).Close() - return true - } else if serviceFound { - if req.listenPort == sr.listenPort { - // Addition requested but responder for service already exists and port is unchanged - return true - } - // Addition requested but responder for service already exists but the listen port has changed - glog.Infof("HealthCheckListenPort for service %s changed from %d to %d - closing old listening port", - req.serviceName, sr.listenPort, req.listenPort) - delete(h.serviceResponderMap, req.serviceName) - (*sr.listener).Close() - } - // Create a service responder object and start listening and serving on the provided port - glog.V(2).Infof("Adding health check listener for service %s on nodePort %d", req.serviceName, req.listenPort) - server := http.Server{ - Addr: fmt.Sprintf(":%d", req.listenPort), - Handler: healthCheckHandler{svcNsName: req.serviceName.String()}, - } - listener, err := net.Listen("tcp", server.Addr) - if err != nil { - glog.Warningf("FAILED to listen on address %s (%s)\n", server.Addr, err) - return false - } - h.serviceResponderMap[req.serviceName] = serviceResponder{serviceName: req.serviceName, - listenPort: req.listenPort, - listener: &listener, - server: &server} - go func() { - // Anonymous goroutine to block on Serve for this listen port - Serve will exit when the listener is closed - glog.V(3).Infof("Goroutine blocking on serving health checks for %s on port %d", req.serviceName, req.listenPort) - if err := server.Serve(listener); err != nil { - glog.V(3).Infof("Proxy HealthCheck listen socket %d for service %s closed with error %s\n", req.listenPort, req.serviceName, err) - return - } - glog.V(3).Infof("Proxy HealthCheck listen socket %d for service %s closed\n", req.listenPort, req.serviceName) - }() - return true -} diff --git a/vendor/k8s.io/kubernetes/pkg/proxy/healthcheck/worker.go b/vendor/k8s.io/kubernetes/pkg/proxy/healthcheck/worker.go deleted file mode 100644 index 1c1d60a0..00000000 --- a/vendor/k8s.io/kubernetes/pkg/proxy/healthcheck/worker.go +++ /dev/null @@ -1,53 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package healthcheck LoadBalancer Healthcheck responder library for kubernetes network proxies -package healthcheck // import "k8s.io/kubernetes/pkg/proxy/healthcheck" - -import ( - "time" - - "github.com/golang/glog" -) - -var healthchecker *proxyHC - -// handlerLoop Serializes all requests to prevent concurrent access to the maps -func (h *proxyHC) handlerLoop() { - ticker := time.NewTicker(1 * time.Minute) - defer ticker.Stop() - for { - select { - case req := <-h.mutationRequestChannel: - h.handleMutationRequest(req) - case req := <-h.listenerRequestChannel: - req.responseChannel <- h.handleServiceListenerRequest(req) - case <-ticker.C: - go h.sync() - } - } -} - -func (h *proxyHC) sync() { - glog.V(4).Infof("%d Health Check Listeners", len(h.serviceResponderMap)) - glog.V(4).Infof("%d Services registered for health checking", len(h.serviceEndpointsMap.List())) - for _, svc := range h.serviceEndpointsMap.ListKeys() { - if e, ok := h.serviceEndpointsMap.Get(svc); ok { - endpointList := e.(*serviceEndpointsList) - glog.V(4).Infof("Service %s has %d local endpoints", svc, endpointList.endpoints.Len()) - } - } -} diff --git a/vendor/k8s.io/kubernetes/pkg/proxy/iptables/BUILD b/vendor/k8s.io/kubernetes/pkg/proxy/iptables/BUILD index 4b649957..e31f28b8 100644 --- a/vendor/k8s.io/kubernetes/pkg/proxy/iptables/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/proxy/iptables/BUILD @@ -50,7 +50,6 @@ go_test( "//vendor:k8s.io/apimachinery/pkg/apis/meta/v1", "//vendor:k8s.io/apimachinery/pkg/types", "//vendor:k8s.io/apimachinery/pkg/util/intstr", - "//vendor:k8s.io/apimachinery/pkg/util/sets", ], ) diff --git a/vendor/k8s.io/kubernetes/pkg/proxy/iptables/proxier.go b/vendor/k8s.io/kubernetes/pkg/proxy/iptables/proxier.go index b2f6c0b0..7326c369 100644 --- a/vendor/k8s.io/kubernetes/pkg/proxy/iptables/proxier.go +++ b/vendor/k8s.io/kubernetes/pkg/proxy/iptables/proxier.go @@ -213,7 +213,7 @@ type Proxier struct { nodeIP net.IP portMapper portOpener recorder record.EventRecorder - healthChecker healthChecker + healthChecker healthcheck.Server } type localPort struct { @@ -245,17 +245,6 @@ func (l *listenPortOpener) OpenLocalPort(lp *localPort) (closeable, error) { return openLocalPort(lp) } -type healthChecker interface { - UpdateEndpoints(serviceName types.NamespacedName, endpointUIDs sets.String) -} - -// TODO: the healthcheck pkg should offer a type -type globalHealthChecker struct{} - -func (globalHealthChecker) UpdateEndpoints(serviceName types.NamespacedName, endpointUIDs sets.String) { - healthcheck.UpdateEndpoints(serviceName, endpointUIDs) -} - // Proxier implements ProxyProvider var _ proxy.ProxyProvider = &Proxier{} @@ -309,8 +298,7 @@ func NewProxier(ipt utiliptables.Interface, glog.Warningf("clusterCIDR not specified, unable to distinguish between internal and external traffic") } - healthChecker := globalHealthChecker{} - go healthcheck.Run() + healthChecker := healthcheck.NewServer(hostname, recorder, nil, nil) // use default implementations of deps var throttle flowcontrol.RateLimiter // Defaulting back to not limit sync rate when minSyncPeriod is 0. @@ -444,18 +432,12 @@ func (proxier *Proxier) SyncLoop() { } } -type healthCheckPort struct { - namespace types.NamespacedName - nodeport int -} - // Accepts a list of Services and the existing service map. Returns the new -// service map, a list of healthcheck ports to add to or remove from the health -// checking listener service, and a set of stale UDP services. -func buildServiceMap(allServices []api.Service, oldServiceMap proxyServiceMap) (proxyServiceMap, []healthCheckPort, []healthCheckPort, sets.String) { +// service map, a map of healthcheck ports, and a set of stale UDP +// services. +func buildServiceMap(allServices []api.Service, oldServiceMap proxyServiceMap) (proxyServiceMap, map[types.NamespacedName]uint16, sets.String) { newServiceMap := make(proxyServiceMap) - healthCheckAdd := make([]healthCheckPort, 0) - healthCheckDel := make([]healthCheckPort, 0) + hcPorts := make(map[types.NamespacedName]uint16) for i := range allServices { service := &allServices[i] @@ -492,12 +474,8 @@ func buildServiceMap(allServices []api.Service, oldServiceMap proxyServiceMap) ( glog.V(1).Infof("Updating existing service %q at %s:%d/%s", serviceName, info.clusterIP, servicePort.Port, servicePort.Protocol) } - if !exists || !equal { - if info.onlyNodeLocalEndpoints && info.healthCheckNodePort > 0 { - healthCheckAdd = append(healthCheckAdd, healthCheckPort{serviceName.NamespacedName, info.healthCheckNodePort}) - } else { - healthCheckDel = append(healthCheckDel, healthCheckPort{serviceName.NamespacedName, 0}) - } + if info.onlyNodeLocalEndpoints { + hcPorts[svcName] = uint16(info.healthCheckNodePort) } newServiceMap[serviceName] = info @@ -505,6 +483,13 @@ func buildServiceMap(allServices []api.Service, oldServiceMap proxyServiceMap) ( } } + for nsn, port := range hcPorts { + if port == 0 { + glog.Errorf("Service %q has no healthcheck nodeport", nsn) + delete(hcPorts, nsn) + } + } + staleUDPServices := sets.NewString() // Remove serviceports missing from the update. for name, info := range oldServiceMap { @@ -513,13 +498,10 @@ func buildServiceMap(allServices []api.Service, oldServiceMap proxyServiceMap) ( if info.protocol == api.ProtocolUDP { staleUDPServices.Insert(info.clusterIP.String()) } - if info.onlyNodeLocalEndpoints && info.healthCheckNodePort > 0 { - healthCheckDel = append(healthCheckDel, healthCheckPort{name.NamespacedName, info.healthCheckNodePort}) - } } } - return newServiceMap, healthCheckAdd, healthCheckDel, staleUDPServices + return newServiceMap, hcPorts, staleUDPServices } // OnServiceUpdate tracks the active set of service proxies. @@ -533,19 +515,11 @@ func (proxier *Proxier) OnServiceUpdate(allServices []api.Service) { defer proxier.mu.Unlock() proxier.haveReceivedServiceUpdate = true - newServiceMap, hcAdd, hcDel, staleUDPServices := buildServiceMap(allServices, proxier.serviceMap) - for _, hc := range hcAdd { - glog.V(4).Infof("Adding health check for %+v, port %v", hc.namespace, hc.nodeport) - // Turn on healthcheck responder to listen on the health check nodePort - // FIXME: handle failures from adding the service - healthcheck.AddServiceListener(hc.namespace, hc.nodeport) - } - for _, hc := range hcDel { - // Remove ServiceListener health check nodePorts from the health checker - // TODO - Stats - glog.V(4).Infof("Deleting health check for %+v, port %v", hc.namespace, hc.nodeport) - // FIXME: handle failures from deleting the service - healthcheck.DeleteServiceListener(hc.namespace, hc.nodeport) + newServiceMap, hcPorts, staleUDPServices := buildServiceMap(allServices, proxier.serviceMap) + + // update healthcheck ports + if err := proxier.healthChecker.SyncServices(hcPorts); err != nil { + glog.Errorf("Error syncing healtcheck ports: %v", err) } if len(newServiceMap) != len(proxier.serviceMap) || !reflect.DeepEqual(newServiceMap, proxier.serviceMap) { @@ -568,7 +542,13 @@ func (proxier *Proxier) OnEndpointsUpdate(allEndpoints []api.Endpoints) { proxier.allEndpoints = allEndpoints // TODO: once service has made this same transform, move this into proxier.syncProxyRules() - newMap, staleConnections := updateEndpoints(proxier.allEndpoints, proxier.endpointsMap, proxier.hostname, proxier.healthChecker) + newMap, hcEndpoints, staleConnections := updateEndpoints(proxier.allEndpoints, proxier.endpointsMap, proxier.hostname) + + // update healthcheck endpoints + if err := proxier.healthChecker.SyncEndpoints(hcEndpoints); err != nil { + glog.Errorf("Error syncing healthcheck endoints: %v", err) + } + if len(newMap) != len(proxier.endpointsMap) || !reflect.DeepEqual(newMap, proxier.endpointsMap) { proxier.endpointsMap = newMap proxier.syncProxyRules() @@ -580,11 +560,11 @@ func (proxier *Proxier) OnEndpointsUpdate(allEndpoints []api.Endpoints) { } // Convert a slice of api.Endpoints objects into a map of service-port -> endpoints. -func updateEndpoints(allEndpoints []api.Endpoints, curMap proxyEndpointMap, hostname string, - healthChecker healthChecker) (newMap proxyEndpointMap, staleSet map[endpointServicePair]bool) { +func updateEndpoints(allEndpoints []api.Endpoints, curMap proxyEndpointMap, hostname string) (newMap proxyEndpointMap, hcEndpoints map[types.NamespacedName]int, staleSet map[endpointServicePair]bool) { // return values newMap = make(proxyEndpointMap) + hcEndpoints = make(map[types.NamespacedName]int) staleSet = make(map[endpointServicePair]bool) // Update endpoints for services. @@ -610,19 +590,30 @@ func updateEndpoints(allEndpoints []api.Endpoints, curMap proxyEndpointMap, host } } - // Update service health check - allSvcPorts := make(map[proxy.ServicePortName]bool) - for svcPort := range curMap { - allSvcPorts[svcPort] = true - } - for svcPort := range newMap { - allSvcPorts[svcPort] = true - } - for svcPort := range allSvcPorts { - updateHealthCheckEntries(svcPort.NamespacedName, newMap[svcPort], healthChecker) + if !utilfeature.DefaultFeatureGate.Enabled(features.ExternalTrafficLocalOnly) { + return } - return newMap, staleSet + // accumulate local IPs per service, ignoring ports + localIPs := map[types.NamespacedName]sets.String{} + for svcPort := range newMap { + for _, ep := range newMap[svcPort] { + if ep.isLocal { + nsn := svcPort.NamespacedName + if localIPs[nsn] == nil { + localIPs[nsn] = sets.NewString() + } + ip := strings.Split(ep.endpoint, ":")[0] // just the IP part + localIPs[nsn].Insert(ip) + } + } + } + // produce a count per service + for nsn, ips := range localIPs { + hcEndpoints[nsn] = len(ips) + } + + return newMap, hcEndpoints, staleSet } // Gather information about all the endpoint state for a given api.Endpoints. @@ -668,23 +659,6 @@ func accumulateEndpointsMap(endpoints *api.Endpoints, hostname string, } } -// updateHealthCheckEntries - send the new set of local endpoints to the health checker -func updateHealthCheckEntries(name types.NamespacedName, endpoints []*endpointsInfo, healthChecker healthChecker) { - if !utilfeature.DefaultFeatureGate.Enabled(features.ExternalTrafficLocalOnly) { - return - } - - // Use a set instead of a slice to provide deduplication - epSet := sets.NewString() - for _, portInfo := range endpoints { - if portInfo.isLocal { - // kube-proxy health check only needs local endpoints - epSet.Insert(fmt.Sprintf("%s/%s", name.Namespace, name.Name)) - } - } - healthChecker.UpdateEndpoints(name, epSet) -} - // portProtoHash takes the ServicePortName and protocol for a service // returns the associated 16 character hash. This is computed by hashing (sha256) // then encoding to base32 and truncating to 16 chars. We do this because IPTables diff --git a/vendor/k8s.io/kubernetes/pkg/proxy/iptables/proxier_test.go b/vendor/k8s.io/kubernetes/pkg/proxy/iptables/proxier_test.go index b3ac6390..e7e4bc3f 100644 --- a/vendor/k8s.io/kubernetes/pkg/proxy/iptables/proxier_test.go +++ b/vendor/k8s.io/kubernetes/pkg/proxy/iptables/proxier_test.go @@ -17,6 +17,7 @@ limitations under the License. package iptables import ( + "reflect" "strconv" "testing" @@ -29,7 +30,6 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/intstr" - "k8s.io/apimachinery/pkg/util/sets" "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/service" "k8s.io/kubernetes/pkg/proxy" @@ -355,9 +355,27 @@ func (f *fakePortOpener) OpenLocalPort(lp *localPort) (closeable, error) { return nil, nil } -type fakeHealthChecker struct{} +type fakeHealthChecker struct { + services map[types.NamespacedName]uint16 + endpoints map[types.NamespacedName]int +} -func (fakeHealthChecker) UpdateEndpoints(serviceName types.NamespacedName, endpointUIDs sets.String) {} +func newFakeHealthChecker() *fakeHealthChecker { + return &fakeHealthChecker{ + services: map[types.NamespacedName]uint16{}, + endpoints: map[types.NamespacedName]int{}, + } +} + +func (fake *fakeHealthChecker) SyncServices(newServices map[types.NamespacedName]uint16) error { + fake.services = newServices + return nil +} + +func (fake *fakeHealthChecker) SyncEndpoints(newEndpoints map[types.NamespacedName]int) error { + fake.endpoints = newEndpoints + return nil +} const testHostname = "test-hostname" @@ -374,7 +392,7 @@ func NewFakeProxier(ipt utiliptables.Interface) *Proxier { hostname: testHostname, portsMap: make(map[localPort]closeable), portMapper: &fakePortOpener{[]*localPort{}}, - healthChecker: fakeHealthChecker{}, + healthChecker: newFakeHealthChecker(), } } @@ -926,30 +944,18 @@ func TestBuildServiceMapAddRemove(t *testing.T) { }), } - serviceMap, hcAdd, hcDel, staleUDPServices := buildServiceMap(services, make(proxyServiceMap)) + serviceMap, hcPorts, staleUDPServices := buildServiceMap(services, make(proxyServiceMap)) if len(serviceMap) != 8 { t.Errorf("expected service map length 8, got %v", serviceMap) } // The only-local-loadbalancer ones get added - if len(hcAdd) != 2 { - t.Errorf("expected healthcheck add length 2, got %v", hcAdd) + if len(hcPorts) != 1 { + t.Errorf("expected 1 healthcheck port, got %v", hcPorts) } else { - for _, hc := range hcAdd { - if hc.namespace.Namespace != "somewhere" || hc.namespace.Name != "only-local-load-balancer" { - t.Errorf("unexpected healthcheck listener added: %v", hc) - } - } - } - - // All the rest get deleted - if len(hcDel) != 6 { - t.Errorf("expected healthcheck del length 6, got %v", hcDel) - } else { - for _, hc := range hcDel { - if hc.namespace.Namespace == "somewhere" && hc.namespace.Name == "only-local-load-balancer" { - t.Errorf("unexpected healthcheck listener deleted: %v", hc) - } + nsn := makeNSN("somewhere", "only-local-load-balancer") + if port, found := hcPorts[nsn]; !found || port != 345 { + t.Errorf("expected healthcheck port [%q]=345: got %v", nsn, hcPorts) } } @@ -961,27 +967,13 @@ func TestBuildServiceMapAddRemove(t *testing.T) { // Remove some stuff services = []api.Service{services[0]} services[0].Spec.Ports = []api.ServicePort{services[0].Spec.Ports[1]} - serviceMap, hcAdd, hcDel, staleUDPServices = buildServiceMap(services, serviceMap) + serviceMap, hcPorts, staleUDPServices = buildServiceMap(services, serviceMap) if len(serviceMap) != 1 { t.Errorf("expected service map length 1, got %v", serviceMap) } - if len(hcAdd) != 0 { - t.Errorf("expected healthcheck add length 1, got %v", hcAdd) - } - - // The only OnlyLocal annotation was removed above, so we expect a delete now. - // FIXME: Since the BetaAnnotationHealthCheckNodePort is the same for all - // ServicePorts, we'll get one delete per ServicePort, even though they all - // contain the same information - if len(hcDel) != 2 { - t.Errorf("expected healthcheck del length 2, got %v", hcDel) - } else { - for _, hc := range hcDel { - if hc.namespace.Namespace != "somewhere" || hc.namespace.Name != "only-local-load-balancer" { - t.Errorf("unexpected healthcheck listener deleted: %v", hc) - } - } + if len(hcPorts) != 0 { + t.Errorf("expected healthcheck ports length 1, got %v", hcPorts) } // All services but one were deleted. While you'd expect only the ClusterIPs @@ -1008,17 +1000,14 @@ func TestBuildServiceMapServiceHeadless(t *testing.T) { } // Headless service should be ignored - serviceMap, hcAdd, hcDel, staleUDPServices := buildServiceMap(services, make(proxyServiceMap)) + serviceMap, hcPorts, staleUDPServices := buildServiceMap(services, make(proxyServiceMap)) if len(serviceMap) != 0 { t.Errorf("expected service map length 0, got %d", len(serviceMap)) } // No proxied services, so no healthchecks - if len(hcAdd) != 0 { - t.Errorf("expected healthcheck add length 0, got %d", len(hcAdd)) - } - if len(hcDel) != 0 { - t.Errorf("expected healthcheck del length 0, got %d", len(hcDel)) + if len(hcPorts) != 0 { + t.Errorf("expected healthcheck ports length 0, got %d", len(hcPorts)) } if len(staleUDPServices) != 0 { @@ -1036,16 +1025,13 @@ func TestBuildServiceMapServiceTypeExternalName(t *testing.T) { }), } - serviceMap, hcAdd, hcDel, staleUDPServices := buildServiceMap(services, make(proxyServiceMap)) + serviceMap, hcPorts, staleUDPServices := buildServiceMap(services, make(proxyServiceMap)) if len(serviceMap) != 0 { t.Errorf("expected service map length 0, got %v", serviceMap) } // No proxied services, so no healthchecks - if len(hcAdd) != 0 { - t.Errorf("expected healthcheck add length 0, got %v", hcAdd) - } - if len(hcDel) != 0 { - t.Errorf("expected healthcheck del length 0, got %v", hcDel) + if len(hcPorts) != 0 { + t.Errorf("expected healthcheck ports length 0, got %v", hcPorts) } if len(staleUDPServices) != 0 { t.Errorf("expected stale UDP services length 0, got %v", staleUDPServices) @@ -1081,15 +1067,12 @@ func TestBuildServiceMapServiceUpdate(t *testing.T) { }), } - serviceMap, hcAdd, hcDel, staleUDPServices := buildServiceMap(first, make(proxyServiceMap)) + serviceMap, hcPorts, staleUDPServices := buildServiceMap(first, make(proxyServiceMap)) if len(serviceMap) != 2 { t.Errorf("expected service map length 2, got %v", serviceMap) } - if len(hcAdd) != 0 { - t.Errorf("expected healthcheck add length 0, got %v", hcAdd) - } - if len(hcDel) != 2 { - t.Errorf("expected healthcheck del length 2, got %v", hcDel) + if len(hcPorts) != 0 { + t.Errorf("expected healthcheck ports length 0, got %v", hcPorts) } if len(staleUDPServices) != 0 { // Services only added, so nothing stale yet @@ -1097,15 +1080,12 @@ func TestBuildServiceMapServiceUpdate(t *testing.T) { } // Change service to load-balancer - serviceMap, hcAdd, hcDel, staleUDPServices = buildServiceMap(second, serviceMap) + serviceMap, hcPorts, staleUDPServices = buildServiceMap(second, serviceMap) if len(serviceMap) != 2 { t.Errorf("expected service map length 2, got %v", serviceMap) } - if len(hcAdd) != 2 { - t.Errorf("expected healthcheck add length 2, got %v", hcAdd) - } - if len(hcDel) != 0 { - t.Errorf("expected healthcheck add length 2, got %v", hcDel) + if len(hcPorts) != 1 { + t.Errorf("expected healthcheck ports length 1, got %v", hcPorts) } if len(staleUDPServices) != 0 { t.Errorf("expected stale UDP services length 0, got %v", staleUDPServices.List()) @@ -1113,30 +1093,24 @@ func TestBuildServiceMapServiceUpdate(t *testing.T) { // No change; make sure the service map stays the same and there are // no health-check changes - serviceMap, hcAdd, hcDel, staleUDPServices = buildServiceMap(second, serviceMap) + serviceMap, hcPorts, staleUDPServices = buildServiceMap(second, serviceMap) if len(serviceMap) != 2 { t.Errorf("expected service map length 2, got %v", serviceMap) } - if len(hcAdd) != 0 { - t.Errorf("expected healthcheck add length 0, got %v", hcAdd) - } - if len(hcDel) != 0 { - t.Errorf("expected healthcheck add length 2, got %v", hcDel) + if len(hcPorts) != 1 { + t.Errorf("expected healthcheck ports length 1, got %v", hcPorts) } if len(staleUDPServices) != 0 { t.Errorf("expected stale UDP services length 0, got %v", staleUDPServices.List()) } // And back to ClusterIP - serviceMap, hcAdd, hcDel, staleUDPServices = buildServiceMap(first, serviceMap) + serviceMap, hcPorts, staleUDPServices = buildServiceMap(first, serviceMap) if len(serviceMap) != 2 { t.Errorf("expected service map length 2, got %v", serviceMap) } - if len(hcAdd) != 0 { - t.Errorf("expected healthcheck add length 0, got %v", hcAdd) - } - if len(hcDel) != 2 { - t.Errorf("expected healthcheck del length 2, got %v", hcDel) + if len(hcPorts) != 0 { + t.Errorf("expected healthcheck ports length 0, got %v", hcPorts) } if len(staleUDPServices) != 0 { // Services only added, so nothing stale yet @@ -1386,28 +1360,33 @@ func makeTestEndpoints(namespace, name string, eptFunc func(*api.Endpoints)) api return ept } +func makeNSN(namespace, name string) types.NamespacedName { + return types.NamespacedName{Namespace: namespace, Name: name} +} + func makeServicePortName(ns, name, port string) proxy.ServicePortName { return proxy.ServicePortName{ - NamespacedName: types.NamespacedName{ - Namespace: ns, - Name: name, - }, - Port: port, + NamespacedName: makeNSN(ns, name), + Port: port, } } func Test_updateEndpoints(t *testing.T) { + var nodeName = "host" + testCases := []struct { - newEndpoints []api.Endpoints - oldEndpoints map[proxy.ServicePortName][]*endpointsInfo - expectedResult map[proxy.ServicePortName][]*endpointsInfo - expectedStale []endpointServicePair + newEndpoints []api.Endpoints + oldEndpoints map[proxy.ServicePortName][]*endpointsInfo + expectedResult map[proxy.ServicePortName][]*endpointsInfo + expectedStale []endpointServicePair + expectedHealthchecks map[types.NamespacedName]int }{{ // Case[0]: nothing - newEndpoints: []api.Endpoints{}, - oldEndpoints: map[proxy.ServicePortName][]*endpointsInfo{}, - expectedResult: map[proxy.ServicePortName][]*endpointsInfo{}, - expectedStale: []endpointServicePair{}, + newEndpoints: []api.Endpoints{}, + oldEndpoints: map[proxy.ServicePortName][]*endpointsInfo{}, + expectedResult: map[proxy.ServicePortName][]*endpointsInfo{}, + expectedStale: []endpointServicePair{}, + expectedHealthchecks: map[types.NamespacedName]int{}, }, { // Case[1]: no change, unnamed port newEndpoints: []api.Endpoints{ @@ -1432,14 +1411,16 @@ func Test_updateEndpoints(t *testing.T) { {"1.1.1.1:11", false}, }, }, - expectedStale: []endpointServicePair{}, + expectedStale: []endpointServicePair{}, + expectedHealthchecks: map[types.NamespacedName]int{}, }, { - // Case[2]: no change, named port + // Case[2]: no change, named port, local newEndpoints: []api.Endpoints{ makeTestEndpoints("ns1", "ep1", func(ept *api.Endpoints) { ept.Subsets = []api.EndpointSubset{{ Addresses: []api.EndpointAddress{{ - IP: "1.1.1.1", + IP: "1.1.1.1", + NodeName: &nodeName, }}, Ports: []api.EndpointPort{{ Name: "p11", @@ -1450,15 +1431,18 @@ func Test_updateEndpoints(t *testing.T) { }, oldEndpoints: map[proxy.ServicePortName][]*endpointsInfo{ makeServicePortName("ns1", "ep1", "p11"): { - {"1.1.1.1:11", false}, + {"1.1.1.1:11", true}, }, }, expectedResult: map[proxy.ServicePortName][]*endpointsInfo{ makeServicePortName("ns1", "ep1", "p11"): { - {"1.1.1.1:11", false}, + {"1.1.1.1:11", true}, }, }, expectedStale: []endpointServicePair{}, + expectedHealthchecks: map[types.NamespacedName]int{ + makeNSN("ns1", "ep1"): 1, + }, }, { // Case[3]: no change, multiple subsets newEndpoints: []api.Endpoints{ @@ -1498,14 +1482,16 @@ func Test_updateEndpoints(t *testing.T) { {"1.1.1.2:12", false}, }, }, - expectedStale: []endpointServicePair{}, + expectedStale: []endpointServicePair{}, + expectedHealthchecks: map[types.NamespacedName]int{}, }, { - // Case[4]: no change, multiple subsets, multiple ports + // Case[4]: no change, multiple subsets, multiple ports, local newEndpoints: []api.Endpoints{ makeTestEndpoints("ns1", "ep1", func(ept *api.Endpoints) { ept.Subsets = []api.EndpointSubset{{ Addresses: []api.EndpointAddress{{ - IP: "1.1.1.1", + IP: "1.1.1.1", + NodeName: &nodeName, }}, Ports: []api.EndpointPort{{ Name: "p11", @@ -1527,10 +1513,10 @@ func Test_updateEndpoints(t *testing.T) { }, oldEndpoints: map[proxy.ServicePortName][]*endpointsInfo{ makeServicePortName("ns1", "ep1", "p11"): { - {"1.1.1.1:11", false}, + {"1.1.1.1:11", true}, }, makeServicePortName("ns1", "ep1", "p12"): { - {"1.1.1.1:12", false}, + {"1.1.1.1:12", true}, }, makeServicePortName("ns1", "ep1", "p13"): { {"1.1.1.3:13", false}, @@ -1538,16 +1524,19 @@ func Test_updateEndpoints(t *testing.T) { }, expectedResult: map[proxy.ServicePortName][]*endpointsInfo{ makeServicePortName("ns1", "ep1", "p11"): { - {"1.1.1.1:11", false}, + {"1.1.1.1:11", true}, }, makeServicePortName("ns1", "ep1", "p12"): { - {"1.1.1.1:12", false}, + {"1.1.1.1:12", true}, }, makeServicePortName("ns1", "ep1", "p13"): { {"1.1.1.3:13", false}, }, }, expectedStale: []endpointServicePair{}, + expectedHealthchecks: map[types.NamespacedName]int{ + makeNSN("ns1", "ep1"): 1, + }, }, { // Case[5]: no change, multiple endpoints, subsets, IPs, and ports newEndpoints: []api.Endpoints{ @@ -1556,7 +1545,8 @@ func Test_updateEndpoints(t *testing.T) { Addresses: []api.EndpointAddress{{ IP: "1.1.1.1", }, { - IP: "1.1.1.2", + IP: "1.1.1.2", + NodeName: &nodeName, }}, Ports: []api.EndpointPort{{ Name: "p11", @@ -1569,7 +1559,8 @@ func Test_updateEndpoints(t *testing.T) { Addresses: []api.EndpointAddress{{ IP: "1.1.1.3", }, { - IP: "1.1.1.4", + IP: "1.1.1.4", + NodeName: &nodeName, }}, Ports: []api.EndpointPort{{ Name: "p13", @@ -1585,7 +1576,8 @@ func Test_updateEndpoints(t *testing.T) { Addresses: []api.EndpointAddress{{ IP: "2.2.2.1", }, { - IP: "2.2.2.2", + IP: "2.2.2.2", + NodeName: &nodeName, }}, Ports: []api.EndpointPort{{ Name: "p21", @@ -1600,63 +1592,68 @@ func Test_updateEndpoints(t *testing.T) { oldEndpoints: map[proxy.ServicePortName][]*endpointsInfo{ makeServicePortName("ns1", "ep1", "p11"): { {"1.1.1.1:11", false}, - {"1.1.1.2:11", false}, + {"1.1.1.2:11", true}, }, makeServicePortName("ns1", "ep1", "p12"): { {"1.1.1.1:12", false}, - {"1.1.1.2:12", false}, + {"1.1.1.2:12", true}, }, makeServicePortName("ns1", "ep1", "p13"): { {"1.1.1.3:13", false}, - {"1.1.1.4:13", false}, + {"1.1.1.4:13", true}, }, makeServicePortName("ns1", "ep1", "p14"): { {"1.1.1.3:14", false}, - {"1.1.1.4:14", false}, + {"1.1.1.4:14", true}, }, makeServicePortName("ns2", "ep2", "p21"): { {"2.2.2.1:21", false}, - {"2.2.2.2:21", false}, + {"2.2.2.2:21", true}, }, makeServicePortName("ns2", "ep2", "p22"): { {"2.2.2.1:22", false}, - {"2.2.2.2:22", false}, + {"2.2.2.2:22", true}, }, }, expectedResult: map[proxy.ServicePortName][]*endpointsInfo{ makeServicePortName("ns1", "ep1", "p11"): { {"1.1.1.1:11", false}, - {"1.1.1.2:11", false}, + {"1.1.1.2:11", true}, }, makeServicePortName("ns1", "ep1", "p12"): { {"1.1.1.1:12", false}, - {"1.1.1.2:12", false}, + {"1.1.1.2:12", true}, }, makeServicePortName("ns1", "ep1", "p13"): { {"1.1.1.3:13", false}, - {"1.1.1.4:13", false}, + {"1.1.1.4:13", true}, }, makeServicePortName("ns1", "ep1", "p14"): { {"1.1.1.3:14", false}, - {"1.1.1.4:14", false}, + {"1.1.1.4:14", true}, }, makeServicePortName("ns2", "ep2", "p21"): { {"2.2.2.1:21", false}, - {"2.2.2.2:21", false}, + {"2.2.2.2:21", true}, }, makeServicePortName("ns2", "ep2", "p22"): { {"2.2.2.1:22", false}, - {"2.2.2.2:22", false}, + {"2.2.2.2:22", true}, }, }, expectedStale: []endpointServicePair{}, + expectedHealthchecks: map[types.NamespacedName]int{ + makeNSN("ns1", "ep1"): 2, + makeNSN("ns2", "ep2"): 1, + }, }, { // Case[6]: add an Endpoints newEndpoints: []api.Endpoints{ makeTestEndpoints("ns1", "ep1", func(ept *api.Endpoints) { ept.Subsets = []api.EndpointSubset{{ Addresses: []api.EndpointAddress{{ - IP: "1.1.1.1", + IP: "1.1.1.1", + NodeName: &nodeName, }}, Ports: []api.EndpointPort{{ Port: 11, @@ -1667,16 +1664,19 @@ func Test_updateEndpoints(t *testing.T) { oldEndpoints: map[proxy.ServicePortName][]*endpointsInfo{ /* empty */ }, expectedResult: map[proxy.ServicePortName][]*endpointsInfo{ makeServicePortName("ns1", "ep1", ""): { - {"1.1.1.1:11", false}, + {"1.1.1.1:11", true}, }, }, expectedStale: []endpointServicePair{}, + expectedHealthchecks: map[types.NamespacedName]int{ + makeNSN("ns1", "ep1"): 1, + }, }, { // Case[7]: remove an Endpoints newEndpoints: []api.Endpoints{ /* empty */ }, oldEndpoints: map[proxy.ServicePortName][]*endpointsInfo{ makeServicePortName("ns1", "ep1", ""): { - {"1.1.1.1:11", false}, + {"1.1.1.1:11", true}, }, }, expectedResult: map[proxy.ServicePortName][]*endpointsInfo{}, @@ -1684,6 +1684,7 @@ func Test_updateEndpoints(t *testing.T) { endpoint: "1.1.1.1:11", servicePortName: makeServicePortName("ns1", "ep1", ""), }}, + expectedHealthchecks: map[types.NamespacedName]int{}, }, { // Case[8]: add an IP and port newEndpoints: []api.Endpoints{ @@ -1692,7 +1693,8 @@ func Test_updateEndpoints(t *testing.T) { Addresses: []api.EndpointAddress{{ IP: "1.1.1.1", }, { - IP: "1.1.1.2", + IP: "1.1.1.2", + NodeName: &nodeName, }}, Ports: []api.EndpointPort{{ Name: "p11", @@ -1712,14 +1714,17 @@ func Test_updateEndpoints(t *testing.T) { expectedResult: map[proxy.ServicePortName][]*endpointsInfo{ makeServicePortName("ns1", "ep1", "p11"): { {"1.1.1.1:11", false}, - {"1.1.1.2:11", false}, + {"1.1.1.2:11", true}, }, makeServicePortName("ns1", "ep1", "p12"): { {"1.1.1.1:12", false}, - {"1.1.1.2:12", false}, + {"1.1.1.2:12", true}, }, }, expectedStale: []endpointServicePair{}, + expectedHealthchecks: map[types.NamespacedName]int{ + makeNSN("ns1", "ep1"): 1, + }, }, { // Case[9]: remove an IP and port newEndpoints: []api.Endpoints{ @@ -1760,6 +1765,7 @@ func Test_updateEndpoints(t *testing.T) { endpoint: "1.1.1.2:12", servicePortName: makeServicePortName("ns1", "ep1", "p12"), }}, + expectedHealthchecks: map[types.NamespacedName]int{}, }, { // Case[10]: add a subset newEndpoints: []api.Endpoints{ @@ -1774,7 +1780,8 @@ func Test_updateEndpoints(t *testing.T) { }}, }, { Addresses: []api.EndpointAddress{{ - IP: "2.2.2.2", + IP: "2.2.2.2", + NodeName: &nodeName, }}, Ports: []api.EndpointPort{{ Name: "p22", @@ -1793,10 +1800,13 @@ func Test_updateEndpoints(t *testing.T) { {"1.1.1.1:11", false}, }, makeServicePortName("ns1", "ep1", "p22"): { - {"2.2.2.2:22", false}, + {"2.2.2.2:22", true}, }, }, expectedStale: []endpointServicePair{}, + expectedHealthchecks: map[types.NamespacedName]int{ + makeNSN("ns1", "ep1"): 1, + }, }, { // Case[11]: remove a subset newEndpoints: []api.Endpoints{ @@ -1829,6 +1839,7 @@ func Test_updateEndpoints(t *testing.T) { endpoint: "2.2.2.2:22", servicePortName: makeServicePortName("ns1", "ep1", "p22"), }}, + expectedHealthchecks: map[types.NamespacedName]int{}, }, { // Case[12]: rename a port newEndpoints: []api.Endpoints{ @@ -1858,6 +1869,7 @@ func Test_updateEndpoints(t *testing.T) { endpoint: "1.1.1.1:11", servicePortName: makeServicePortName("ns1", "ep1", "p11"), }}, + expectedHealthchecks: map[types.NamespacedName]int{}, }, { // Case[13]: renumber a port newEndpoints: []api.Endpoints{ @@ -1887,6 +1899,7 @@ func Test_updateEndpoints(t *testing.T) { endpoint: "1.1.1.1:11", servicePortName: makeServicePortName("ns1", "ep1", "p11"), }}, + expectedHealthchecks: map[types.NamespacedName]int{}, }, { // Case[14]: complex add and remove newEndpoints: []api.Endpoints{ @@ -1928,7 +1941,8 @@ func Test_updateEndpoints(t *testing.T) { makeTestEndpoints("ns4", "ep4", func(ept *api.Endpoints) { ept.Subsets = []api.EndpointSubset{{ Addresses: []api.EndpointAddress{{ - IP: "4.4.4.4", + IP: "4.4.4.4", + NodeName: &nodeName, }}, Ports: []api.EndpointPort{{ Name: "p44", @@ -1942,18 +1956,18 @@ func Test_updateEndpoints(t *testing.T) { {"1.1.1.1:11", false}, }, makeServicePortName("ns2", "ep2", "p22"): { - {"2.2.2.2:22", false}, - {"2.2.2.22:22", false}, + {"2.2.2.2:22", true}, + {"2.2.2.22:22", true}, }, makeServicePortName("ns2", "ep2", "p23"): { - {"2.2.2.3:23", false}, + {"2.2.2.3:23", true}, }, makeServicePortName("ns4", "ep4", "p44"): { - {"4.4.4.4:44", false}, - {"4.4.4.5:44", false}, + {"4.4.4.4:44", true}, + {"4.4.4.5:44", true}, }, makeServicePortName("ns4", "ep4", "p45"): { - {"4.4.4.6:45", false}, + {"4.4.4.6:45", true}, }, }, expectedResult: map[proxy.ServicePortName][]*endpointsInfo{ @@ -1971,7 +1985,7 @@ func Test_updateEndpoints(t *testing.T) { {"3.3.3.3:33", false}, }, makeServicePortName("ns4", "ep4", "p44"): { - {"4.4.4.4:44", false}, + {"4.4.4.4:44", true}, }, }, expectedStale: []endpointServicePair{{ @@ -1990,10 +2004,13 @@ func Test_updateEndpoints(t *testing.T) { endpoint: "4.4.4.6:45", servicePortName: makeServicePortName("ns4", "ep4", "p45"), }}, + expectedHealthchecks: map[types.NamespacedName]int{ + makeNSN("ns4", "ep4"): 1, + }, }} for tci, tc := range testCases { - newMap, stale := updateEndpoints(tc.newEndpoints, tc.oldEndpoints, "host", fakeHealthChecker{}) + newMap, hcEndpoints, stale := updateEndpoints(tc.newEndpoints, tc.oldEndpoints, nodeName) if len(newMap) != len(tc.expectedResult) { t.Errorf("[%d] expected %d results, got %d: %v", tci, len(tc.expectedResult), len(newMap), newMap) } @@ -2016,6 +2033,9 @@ func Test_updateEndpoints(t *testing.T) { t.Errorf("[%d] expected stale[%v], but didn't find it: %v", tci, x, stale) } } + if !reflect.DeepEqual(hcEndpoints, tc.expectedHealthchecks) { + t.Errorf("[%d] expected healthchecks %v, got %v", tci, tc.expectedHealthchecks, hcEndpoints) + } } } diff --git a/vendor/k8s.io/kubernetes/pkg/registry/rbac/rest/storage_rbac.go b/vendor/k8s.io/kubernetes/pkg/registry/rbac/rest/storage_rbac.go index 6ac39c2c..fdddc867 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/rbac/rest/storage_rbac.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/rbac/rest/storage_rbac.go @@ -55,6 +55,8 @@ import ( "k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac/bootstrappolicy" ) +const PostStartHookName = "rbac/bootstrap-roles" + type RESTStorageProvider struct { Authorizer authorizer.Authorizer } @@ -123,7 +125,7 @@ func (p RESTStorageProvider) storage(version schema.GroupVersion, apiResourceCon } func (p RESTStorageProvider) PostStartHook() (string, genericapiserver.PostStartHookFunc, error) { - return "rbac/bootstrap-roles", PostStartHook, nil + return PostStartHookName, PostStartHook, nil } func PostStartHook(hookContext genericapiserver.PostStartHookContext) error { diff --git a/vendor/k8s.io/kubernetes/pkg/version/base.go b/vendor/k8s.io/kubernetes/pkg/version/base.go index fbd95d99..39438e88 100644 --- a/vendor/k8s.io/kubernetes/pkg/version/base.go +++ b/vendor/k8s.io/kubernetes/pkg/version/base.go @@ -51,7 +51,7 @@ var ( // semantic version is a git hash, but the version itself is no // longer the direct output of "git describe", but a slight // translation to be semver compliant. - gitVersion string = "v1.6.1+$Format:%h$" + gitVersion string = "v1.6.4+$Format:%h$" gitCommit string = "$Format:%H$" // sha1 from git, output of $(git rev-parse HEAD) gitTreeState string = "not a git tree" // state of git tree, either "clean" or "dirty" diff --git a/vendor/k8s.io/kubernetes/pkg/volume/photon_pd/photon_pd.go b/vendor/k8s.io/kubernetes/pkg/volume/photon_pd/photon_pd.go index 606c57b1..c3d1d028 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/photon_pd/photon_pd.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/photon_pd/photon_pd.go @@ -151,7 +151,7 @@ func (plugin *photonPersistentDiskPlugin) ConstructVolumeSpec(volumeSpecName, mo // Abstract interface to disk operations. type pdManager interface { // Creates a volume - CreateVolume(provisioner *photonPersistentDiskProvisioner) (pdID string, volumeSizeGB int, err error) + CreateVolume(provisioner *photonPersistentDiskProvisioner) (pdID string, volumeSizeGB int, fstype string, err error) // Deletes a volume DeleteVolume(deleter *photonPersistentDiskDeleter) error } @@ -342,11 +342,15 @@ func (plugin *photonPersistentDiskPlugin) newProvisionerInternal(options volume. } func (p *photonPersistentDiskProvisioner) Provision() (*v1.PersistentVolume, error) { - pdID, sizeGB, err := p.manager.CreateVolume(p) + pdID, sizeGB, fstype, err := p.manager.CreateVolume(p) if err != nil { return nil, err } + if fstype == "" { + fstype = "ext4" + } + pv := &v1.PersistentVolume{ ObjectMeta: metav1.ObjectMeta{ Name: p.options.PVName, @@ -364,7 +368,7 @@ func (p *photonPersistentDiskProvisioner) Provision() (*v1.PersistentVolume, err PersistentVolumeSource: v1.PersistentVolumeSource{ PhotonPersistentDisk: &v1.PhotonPersistentDiskVolumeSource{ PdID: pdID, - FSType: "ext4", + FSType: fstype, }, }, }, diff --git a/vendor/k8s.io/kubernetes/pkg/volume/photon_pd/photon_pd_test.go b/vendor/k8s.io/kubernetes/pkg/volume/photon_pd/photon_pd_test.go index 2d843542..3484aa3f 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/photon_pd/photon_pd_test.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/photon_pd/photon_pd_test.go @@ -88,8 +88,8 @@ func contains(modes []v1.PersistentVolumeAccessMode, mode v1.PersistentVolumeAcc type fakePDManager struct { } -func (fake *fakePDManager) CreateVolume(c *photonPersistentDiskProvisioner) (pdID string, volumeSizeGB int, err error) { - return "test-photon-pd-id", 10, nil +func (fake *fakePDManager) CreateVolume(c *photonPersistentDiskProvisioner) (pdID string, volumeSizeGB int, fstype string, err error) { + return "test-photon-pd-id", 10, "ext4", nil } func (fake *fakePDManager) DeleteVolume(cd *photonPersistentDiskDeleter) error { diff --git a/vendor/k8s.io/kubernetes/pkg/volume/photon_pd/photon_util.go b/vendor/k8s.io/kubernetes/pkg/volume/photon_pd/photon_util.go index f3c19366..1963f37b 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/photon_pd/photon_util.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/photon_pd/photon_util.go @@ -80,11 +80,11 @@ func verifyDevicePath(path string) (string, error) { } // CreateVolume creates a PhotonController persistent disk. -func (util *PhotonDiskUtil) CreateVolume(p *photonPersistentDiskProvisioner) (pdID string, capacityGB int, err error) { +func (util *PhotonDiskUtil) CreateVolume(p *photonPersistentDiskProvisioner) (pdID string, capacityGB int, fstype string, err error) { cloud, err := getCloudProvider(p.plugin.host.GetCloudProvider()) if err != nil { glog.Errorf("Photon Controller Util: CreateVolume failed to get cloud provider. Error [%v]", err) - return "", 0, err + return "", 0, "", err } capacity := p.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)] @@ -102,20 +102,23 @@ func (util *PhotonDiskUtil) CreateVolume(p *photonPersistentDiskProvisioner) (pd switch strings.ToLower(parameter) { case "flavor": volumeOptions.Flavor = value + case "fstype": + fstype = value + glog.V(4).Infof("Photon Controller Util: Setting fstype to %s", fstype) default: glog.Errorf("Photon Controller Util: invalid option %s for volume plugin %s.", parameter, p.plugin.GetPluginName()) - return "", 0, fmt.Errorf("Photon Controller Util: invalid option %s for volume plugin %s.", parameter, p.plugin.GetPluginName()) + return "", 0, "", fmt.Errorf("Photon Controller Util: invalid option %s for volume plugin %s.", parameter, p.plugin.GetPluginName()) } } pdID, err = cloud.CreateDisk(volumeOptions) if err != nil { glog.Errorf("Photon Controller Util: failed to CreateDisk. Error [%v]", err) - return "", 0, err + return "", 0, "", err } glog.V(4).Infof("Successfully created Photon Controller persistent disk %s", name) - return pdID, volSizeGB, nil + return pdID, volSizeGB, "", nil } // DeleteVolume deletes a vSphere volume. diff --git a/vendor/k8s.io/kubernetes/pkg/volume/util/util.go b/vendor/k8s.io/kubernetes/pkg/volume/util/util.go index 035b2f06..187ccc98 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/util/util.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/util/util.go @@ -152,10 +152,9 @@ func GetClassForVolume(kubeClient clientset.Interface, pv *v1.PersistentVolume) if kubeClient == nil { return nil, fmt.Errorf("Cannot get kube client") } - // TODO: replace with a real attribute after beta - className, found := pv.Annotations["volume.beta.kubernetes.io/storage-class"] - if !found { - return nil, fmt.Errorf("Volume has no class annotation") + className := v1.GetPersistentVolumeClass(pv) + if className == "" { + return nil, fmt.Errorf("Volume has no storage class") } class, err := kubeClient.StorageV1beta1().StorageClasses().Get(className, metav1.GetOptions{}) diff --git a/vendor/k8s.io/kubernetes/pkg/volume/vsphere_volume/vsphere_volume.go b/vendor/k8s.io/kubernetes/pkg/volume/vsphere_volume/vsphere_volume.go index ac72babe..4f8df5b2 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/vsphere_volume/vsphere_volume.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/vsphere_volume/vsphere_volume.go @@ -152,7 +152,7 @@ func (plugin *vsphereVolumePlugin) ConstructVolumeSpec(volumeName, mountPath str // Abstract interface to disk operations. type vdManager interface { // Creates a volume - CreateVolume(provisioner *vsphereVolumeProvisioner) (vmDiskPath string, volumeSizeGB int, err error) + CreateVolume(provisioner *vsphereVolumeProvisioner) (vmDiskPath string, volumeSizeGB int, fstype string, err error) // Deletes a volume DeleteVolume(deleter *vsphereVolumeDeleter) error } @@ -188,6 +188,7 @@ type vsphereVolumeMounter struct { func (b *vsphereVolumeMounter) GetAttributes() volume.Attributes { return volume.Attributes{ SupportsSELinux: true, + Managed: true, } } @@ -343,11 +344,15 @@ func (plugin *vsphereVolumePlugin) newProvisionerInternal(options volume.VolumeO } func (v *vsphereVolumeProvisioner) Provision() (*v1.PersistentVolume, error) { - vmDiskPath, sizeKB, err := v.manager.CreateVolume(v) + vmDiskPath, sizeKB, fstype, err := v.manager.CreateVolume(v) if err != nil { return nil, err } + if fstype == "" { + fstype = "ext4" + } + pv := &v1.PersistentVolume{ ObjectMeta: metav1.ObjectMeta{ Name: v.options.PVName, @@ -365,7 +370,7 @@ func (v *vsphereVolumeProvisioner) Provision() (*v1.PersistentVolume, error) { PersistentVolumeSource: v1.PersistentVolumeSource{ VsphereVolume: &v1.VsphereVirtualDiskVolumeSource{ VolumePath: vmDiskPath, - FSType: "ext4", + FSType: fstype, }, }, }, diff --git a/vendor/k8s.io/kubernetes/pkg/volume/vsphere_volume/vsphere_volume_test.go b/vendor/k8s.io/kubernetes/pkg/volume/vsphere_volume/vsphere_volume_test.go index a0b19ee2..6608c513 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/vsphere_volume/vsphere_volume_test.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/vsphere_volume/vsphere_volume_test.go @@ -63,8 +63,8 @@ func getFakeDeviceName(host volume.VolumeHost, volPath string) string { return path.Join(host.GetPluginDir(vsphereVolumePluginName), "device", volPath) } -func (fake *fakePDManager) CreateVolume(v *vsphereVolumeProvisioner) (vmDiskPath string, volumeSizeKB int, err error) { - return "[local] test-volume-name.vmdk", 100, nil +func (fake *fakePDManager) CreateVolume(v *vsphereVolumeProvisioner) (vmDiskPath string, volumeSizeKB int, fstype string, err error) { + return "[local] test-volume-name.vmdk", 100, "ext4", nil } func (fake *fakePDManager) DeleteVolume(vd *vsphereVolumeDeleter) error { diff --git a/vendor/k8s.io/kubernetes/pkg/volume/vsphere_volume/vsphere_volume_util.go b/vendor/k8s.io/kubernetes/pkg/volume/vsphere_volume/vsphere_volume_util.go index c7484fe8..5ceeab76 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/vsphere_volume/vsphere_volume_util.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/vsphere_volume/vsphere_volume_util.go @@ -19,6 +19,7 @@ package vsphere_volume import ( "errors" "fmt" + "strconv" "strings" "time" @@ -35,6 +36,27 @@ const ( checkSleepDuration = time.Second diskByIDPath = "/dev/disk/by-id/" diskSCSIPrefix = "wwn-0x" + Fstype = "fstype" + diskformat = "diskformat" + datastore = "datastore" + + HostFailuresToTolerateCapability = "hostfailurestotolerate" + ForceProvisioningCapability = "forceprovisioning" + CacheReservationCapability = "cachereservation" + DiskStripesCapability = "diskstripes" + ObjectSpaceReservationCapability = "objectspacereservation" + IopsLimitCapability = "iopslimit" + HostFailuresToTolerateCapabilityMin = 0 + HostFailuresToTolerateCapabilityMax = 3 + ForceProvisioningCapabilityMin = 0 + ForceProvisioningCapabilityMax = 1 + CacheReservationCapabilityMin = 0 + CacheReservationCapabilityMax = 100 + DiskStripesCapabilityMin = 1 + DiskStripesCapabilityMax = 12 + ObjectSpaceReservationCapabilityMin = 0 + ObjectSpaceReservationCapabilityMax = 100 + IopsLimitCapabilityMin = 0 ) var ErrProbeVolume = errors.New("Error scanning attached volumes") @@ -52,10 +74,11 @@ func verifyDevicePath(path string) (string, error) { } // CreateVolume creates a vSphere volume. -func (util *VsphereDiskUtil) CreateVolume(v *vsphereVolumeProvisioner) (vmDiskPath string, volumeSizeKB int, err error) { +func (util *VsphereDiskUtil) CreateVolume(v *vsphereVolumeProvisioner) (vmDiskPath string, volumeSizeKB int, fstype string, err error) { + cloud, err := getCloudProvider(v.plugin.host.GetCloudProvider()) if err != nil { - return "", 0, err + return "", 0, "", err } capacity := v.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)] @@ -73,27 +96,43 @@ func (util *VsphereDiskUtil) CreateVolume(v *vsphereVolumeProvisioner) (vmDiskPa // the values to the cloud provider. for parameter, value := range v.options.Parameters { switch strings.ToLower(parameter) { - case "diskformat": + case diskformat: volumeOptions.DiskFormat = value - case "datastore": + case datastore: volumeOptions.Datastore = value + case Fstype: + fstype = value + glog.V(4).Infof("Setting fstype as %q", fstype) + case HostFailuresToTolerateCapability, ForceProvisioningCapability, + CacheReservationCapability, DiskStripesCapability, + ObjectSpaceReservationCapability, IopsLimitCapability: + capabilityData, err := validateVSANCapability(strings.ToLower(parameter), value) + if err != nil { + return "", 0, "", err + } else { + volumeOptions.StorageProfileData += capabilityData + } default: - return "", 0, fmt.Errorf("invalid option %q for volume plugin %s", parameter, v.plugin.GetPluginName()) + return "", 0, "", fmt.Errorf("invalid option %q for volume plugin %s", parameter, v.plugin.GetPluginName()) } } + if volumeOptions.StorageProfileData != "" { + volumeOptions.StorageProfileData = "(" + volumeOptions.StorageProfileData + ")" + } + glog.V(1).Infof("StorageProfileData in vsphere volume %q", volumeOptions.StorageProfileData) // TODO: implement PVC.Selector parsing if v.options.PVC.Spec.Selector != nil { - return "", 0, fmt.Errorf("claim.Spec.Selector is not supported for dynamic provisioning on vSphere") + return "", 0, "", fmt.Errorf("claim.Spec.Selector is not supported for dynamic provisioning on vSphere") } vmDiskPath, err = cloud.CreateVolume(volumeOptions) if err != nil { glog.V(2).Infof("Error creating vsphere volume: %v", err) - return "", 0, err + return "", 0, "", err } glog.V(2).Infof("Successfully created vsphere volume %s", name) - return vmDiskPath, volSizeKB, nil + return vmDiskPath, volSizeKB, fstype, nil } // DeleteVolume deletes a vSphere volume. @@ -132,3 +171,71 @@ func getCloudProvider(cloud cloudprovider.Interface) (*vsphere.VSphere, error) { } return vs, nil } + +// Validate the capability requirement for the user specified policy attributes. +func validateVSANCapability(capabilityName string, capabilityValue string) (string, error) { + var capabilityData string + capabilityIntVal, ok := verifyCapabilityValueIsInteger(capabilityValue) + if !ok { + return "", fmt.Errorf("Invalid value for %s. The capabilityValue: %s must be a valid integer value", capabilityName, capabilityValue) + } + switch strings.ToLower(capabilityName) { + case HostFailuresToTolerateCapability: + if capabilityIntVal >= HostFailuresToTolerateCapabilityMin && capabilityIntVal <= HostFailuresToTolerateCapabilityMax { + capabilityData = " (\"hostFailuresToTolerate\" i" + capabilityValue + ")" + } else { + return "", fmt.Errorf(`Invalid value for hostFailuresToTolerate. + The default value is %d, minimum value is %d and maximum value is %d.`, + 1, HostFailuresToTolerateCapabilityMin, HostFailuresToTolerateCapabilityMax) + } + case ForceProvisioningCapability: + if capabilityIntVal >= ForceProvisioningCapabilityMin && capabilityIntVal <= ForceProvisioningCapabilityMax { + capabilityData = " (\"forceProvisioning\" i" + capabilityValue + ")" + } else { + return "", fmt.Errorf(`Invalid value for forceProvisioning. + The value can be either %d or %d.`, + ForceProvisioningCapabilityMin, ForceProvisioningCapabilityMax) + } + case CacheReservationCapability: + if capabilityIntVal >= CacheReservationCapabilityMin && capabilityIntVal <= CacheReservationCapabilityMax { + capabilityData = " (\"cacheReservation\" i" + strconv.Itoa(capabilityIntVal*10000) + ")" + } else { + return "", fmt.Errorf(`Invalid value for cacheReservation. + The minimum percentage is %d and maximum percentage is %d.`, + CacheReservationCapabilityMin, CacheReservationCapabilityMax) + } + case DiskStripesCapability: + if capabilityIntVal >= DiskStripesCapabilityMin && capabilityIntVal <= DiskStripesCapabilityMax { + capabilityData = " (\"stripeWidth\" i" + capabilityValue + ")" + } else { + return "", fmt.Errorf(`Invalid value for diskStripes. + The minimum value is %d and maximum value is %d.`, + DiskStripesCapabilityMin, DiskStripesCapabilityMax) + } + case ObjectSpaceReservationCapability: + if capabilityIntVal >= ObjectSpaceReservationCapabilityMin && capabilityIntVal <= ObjectSpaceReservationCapabilityMax { + capabilityData = " (\"proportionalCapacity\" i" + capabilityValue + ")" + } else { + return "", fmt.Errorf(`Invalid value for ObjectSpaceReservation. + The minimum percentage is %d and maximum percentage is %d.`, + ObjectSpaceReservationCapabilityMin, ObjectSpaceReservationCapabilityMax) + } + case IopsLimitCapability: + if capabilityIntVal >= IopsLimitCapabilityMin { + capabilityData = " (\"iopsLimit\" i" + capabilityValue + ")" + } else { + return "", fmt.Errorf(`Invalid value for iopsLimit. + The value should be greater than %d.`, IopsLimitCapabilityMin) + } + } + return capabilityData, nil +} + +// Verify if the capability value is of type integer. +func verifyCapabilityValueIsInteger(capabilityValue string) (int, bool) { + i, err := strconv.Atoi(capabilityValue) + if err != nil { + return -1, false + } + return i, true +} diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/admission/serviceaccount/BUILD b/vendor/k8s.io/kubernetes/plugin/pkg/admission/serviceaccount/BUILD index 704725c4..2877fd2d 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/admission/serviceaccount/BUILD +++ b/vendor/k8s.io/kubernetes/plugin/pkg/admission/serviceaccount/BUILD @@ -43,12 +43,14 @@ go_test( "//pkg/api:go_default_library", "//pkg/client/clientset_generated/internalclientset/fake:go_default_library", "//pkg/client/informers/informers_generated/internalversion:go_default_library", + "//pkg/client/listers/core/internalversion:go_default_library", "//pkg/controller:go_default_library", "//pkg/kubelet/types:go_default_library", "//vendor:k8s.io/apimachinery/pkg/api/errors", "//vendor:k8s.io/apimachinery/pkg/apis/meta/v1", "//vendor:k8s.io/apimachinery/pkg/types", "//vendor:k8s.io/apiserver/pkg/admission", + "//vendor:k8s.io/client-go/tools/cache", ], ) diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/admission/serviceaccount/admission.go b/vendor/k8s.io/kubernetes/plugin/pkg/admission/serviceaccount/admission.go index 0be83188..f45586c7 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/admission/serviceaccount/admission.go +++ b/vendor/k8s.io/kubernetes/plugin/pkg/admission/serviceaccount/admission.go @@ -287,18 +287,20 @@ func (s *serviceAccount) getReferencedServiceAccountToken(serviceAccount *api.Se // getServiceAccountTokens returns all ServiceAccountToken secrets for the given ServiceAccount func (s *serviceAccount) getServiceAccountTokens(serviceAccount *api.ServiceAccount) ([]*api.Secret, error) { - tokens, err := s.secretLister.Secrets(serviceAccount.Namespace).List(labels.Everything()) + secrets, err := s.secretLister.Secrets(serviceAccount.Namespace).List(labels.Everything()) if err != nil { return nil, err } - for _, token := range tokens { - if token.Type != api.SecretTypeServiceAccountToken { + tokens := []*api.Secret{} + + for _, secret := range secrets { + if secret.Type != api.SecretTypeServiceAccountToken { continue } - if serviceaccount.InternalIsServiceAccountToken(token, serviceAccount) { - tokens = append(tokens, token) + if serviceaccount.InternalIsServiceAccountToken(secret, serviceAccount) { + tokens = append(tokens, secret) } } return tokens, nil diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/admission/serviceaccount/admission_test.go b/vendor/k8s.io/kubernetes/plugin/pkg/admission/serviceaccount/admission_test.go index be2ff21c..f5b86315 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/admission/serviceaccount/admission_test.go +++ b/vendor/k8s.io/kubernetes/plugin/pkg/admission/serviceaccount/admission_test.go @@ -25,9 +25,11 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apiserver/pkg/admission" + "k8s.io/client-go/tools/cache" "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake" informers "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion" + corelisters "k8s.io/kubernetes/pkg/client/listers/core/internalversion" "k8s.io/kubernetes/pkg/controller" kubelet "k8s.io/kubernetes/pkg/kubelet/types" ) @@ -849,3 +851,59 @@ func TestMultipleReferencedSecrets(t *testing.T) { t.Errorf("expected first referenced secret to be mounted, got %q", name) } } + +func newSecret(secretType api.SecretType, namespace, name, serviceAccountName, serviceAccountUID string) *api.Secret { + return &api.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: name, + Annotations: map[string]string{ + api.ServiceAccountNameKey: serviceAccountName, + api.ServiceAccountUIDKey: serviceAccountUID, + }, + }, + Type: secretType, + } +} + +func TestGetServiceAccountTokens(t *testing.T) { + admit := NewServiceAccount() + indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{}) + admit.secretLister = corelisters.NewSecretLister(indexer) + + ns := "namespace" + serviceAccountUID := "12345" + + sa := &api.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Name: DefaultServiceAccountName, + Namespace: ns, + UID: types.UID(serviceAccountUID), + }, + } + + nonSATokenSecret := newSecret(api.SecretTypeDockercfg, ns, "nonSATokenSecret", DefaultServiceAccountName, serviceAccountUID) + indexer.Add(nonSATokenSecret) + + differentSAToken := newSecret(api.SecretTypeServiceAccountToken, ns, "differentSAToken", "someOtherSA", "someOtherUID") + indexer.Add(differentSAToken) + + matchingSAToken := newSecret(api.SecretTypeServiceAccountToken, ns, "matchingSAToken", DefaultServiceAccountName, serviceAccountUID) + indexer.Add(matchingSAToken) + + tokens, err := admit.getServiceAccountTokens(sa) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + if len(tokens) != 1 { + names := make([]string, 0, len(tokens)) + for _, token := range tokens { + names = append(names, token.Name) + } + t.Fatalf("expected only 1 token, got %v", names) + } + if e, a := matchingSAToken.Name, tokens[0].Name; e != a { + t.Errorf("expected token %s, got %s", e, a) + } +} diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/policy.go b/vendor/k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/policy.go index 2ab19875..4343ec52 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/policy.go +++ b/vendor/k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/policy.go @@ -300,8 +300,8 @@ func ClusterRoles() []rbac.ClusterRole { eventsRule(), rbac.NewRule("create").Groups(legacyGroup).Resources("endpoints", "secrets", "serviceaccounts").RuleOrDie(), rbac.NewRule("delete").Groups(legacyGroup).Resources("secrets").RuleOrDie(), - rbac.NewRule("get").Groups(legacyGroup).Resources("endpoints", "namespaces", "serviceaccounts").RuleOrDie(), - rbac.NewRule("update").Groups(legacyGroup).Resources("endpoints", "serviceaccounts").RuleOrDie(), + rbac.NewRule("get").Groups(legacyGroup).Resources("endpoints", "namespaces", "secrets", "serviceaccounts").RuleOrDie(), + rbac.NewRule("update").Groups(legacyGroup).Resources("endpoints", "secrets", "serviceaccounts").RuleOrDie(), // Needed to check API access. These creates are non-mutating rbac.NewRule("create").Groups(authenticationGroup).Resources("tokenreviews").RuleOrDie(), diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/testdata/cluster-roles.yaml b/vendor/k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/testdata/cluster-roles.yaml index 53193e40..88feeb64 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/testdata/cluster-roles.yaml +++ b/vendor/k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/testdata/cluster-roles.yaml @@ -460,6 +460,7 @@ items: resources: - endpoints - namespaces + - secrets - serviceaccounts verbs: - get @@ -467,6 +468,7 @@ items: - "" resources: - endpoints + - secrets - serviceaccounts verbs: - update diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/predicates/predicates.go b/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/predicates/predicates.go index d255374d..6b659bb7 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/predicates/predicates.go +++ b/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/predicates/predicates.go @@ -1074,14 +1074,13 @@ func getMatchingAntiAffinityTerms(pod *v1.Pod, nodeInfoMap map[string]*scheduler continue } for _, term := range getPodAntiAffinityTerms(affinity.PodAntiAffinity) { - namespaces := priorityutil.GetNamespacesFromPodAffinityTerm(pod, &term) + namespaces := priorityutil.GetNamespacesFromPodAffinityTerm(existingPod, &term) selector, err := metav1.LabelSelectorAsSelector(term.LabelSelector) if err != nil { catchError(err) return } - match := priorityutil.PodMatchesTermsNamespaceAndSelector(pod, namespaces, selector) - if match { + if priorityutil.PodMatchesTermsNamespaceAndSelector(pod, namespaces, selector) { nodeResult = append(nodeResult, matchingPodAntiAffinityTerm{term: &term, node: node}) } } @@ -1109,8 +1108,7 @@ func (c *PodAffinityChecker) getMatchingAntiAffinityTerms(pod *v1.Pod, allPods [ if err != nil { return nil, err } - match := priorityutil.PodMatchesTermsNamespaceAndSelector(pod, namespaces, selector) - if match { + if priorityutil.PodMatchesTermsNamespaceAndSelector(pod, namespaces, selector) { result = append(result, matchingPodAntiAffinityTerm{term: &term, node: existingPodNode}) } } @@ -1128,17 +1126,17 @@ func (c *PodAffinityChecker) satisfiesExistingPodsAntiAffinity(pod *v1.Pod, meta } else { allPods, err := c.podLister.List(labels.Everything()) if err != nil { - glog.V(10).Infof("Failed to get all pods, %+v", err) + glog.Errorf("Failed to get all pods, %+v", err) return false } if matchingTerms, err = c.getMatchingAntiAffinityTerms(pod, allPods); err != nil { - glog.V(10).Infof("Failed to get all terms that pod %+v matches, err: %+v", podName(pod), err) + glog.Errorf("Failed to get all terms that pod %+v matches, err: %+v", podName(pod), err) return false } } for _, term := range matchingTerms { if len(term.term.TopologyKey) == 0 { - glog.V(10).Infof("Empty topologyKey is not allowed except for PreferredDuringScheduling pod anti-affinity") + glog.Error("Empty topologyKey is not allowed except for PreferredDuringScheduling pod anti-affinity") return false } if priorityutil.NodesHaveSameTopologyKey(node, term.node, term.term.TopologyKey) { @@ -1167,7 +1165,7 @@ func (c *PodAffinityChecker) satisfiesPodsAffinityAntiAffinity(pod *v1.Pod, node for _, term := range getPodAffinityTerms(affinity.PodAffinity) { termMatches, matchingPodExists, err := c.anyPodMatchesPodAffinityTerm(pod, allPods, node, &term) if err != nil { - glog.V(10).Infof("Cannot schedule pod %+v onto node %v,because of PodAffinityTerm %v, err: %v", + glog.Errorf("Cannot schedule pod %+v onto node %v,because of PodAffinityTerm %v, err: %v", podName(pod), node.Name, term, err) return false } @@ -1183,7 +1181,7 @@ func (c *PodAffinityChecker) satisfiesPodsAffinityAntiAffinity(pod *v1.Pod, node namespaces := priorityutil.GetNamespacesFromPodAffinityTerm(pod, &term) selector, err := metav1.LabelSelectorAsSelector(term.LabelSelector) if err != nil { - glog.V(10).Infof("Cannot parse selector on term %v for pod %v. Details %v", + glog.Errorf("Cannot parse selector on term %v for pod %v. Details %v", term, podName(pod), err) return false } diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/apimachinery/pkg/util/mergepatch/util.go b/vendor/k8s.io/kubernetes/staging/src/k8s.io/apimachinery/pkg/util/mergepatch/util.go index 591884e5..9261290a 100644 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/apimachinery/pkg/util/mergepatch/util.go +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/apimachinery/pkg/util/mergepatch/util.go @@ -86,6 +86,9 @@ func toYAML(v interface{}) (string, error) { // different values in any key. All keys are required to be strings. Since patches of the // same Type have congruent keys, this is valid for multiple patch types. This method // supports JSON merge patch semantics. +// +// NOTE: Numbers with different types (e.g. int(0) vs int64(0)) will be detected as conflicts. +// Make sure the unmarshaling of left and right are consistent (e.g. use the same library). func HasConflicts(left, right interface{}) (bool, error) { switch typedLeft := left.(type) { case map[string]interface{}: @@ -94,9 +97,11 @@ func HasConflicts(left, right interface{}) (bool, error) { for key, leftValue := range typedLeft { rightValue, ok := typedRight[key] if !ok { - return false, nil + continue + } + if conflict, err := HasConflicts(leftValue, rightValue); err != nil || conflict { + return conflict, err } - return HasConflicts(leftValue, rightValue) } return false, nil @@ -111,7 +116,9 @@ func HasConflicts(left, right interface{}) (bool, error) { } for i := range typedLeft { - return HasConflicts(typedLeft[i], typedRight[i]) + if conflict, err := HasConflicts(typedLeft[i], typedRight[i]); err != nil || conflict { + return conflict, err + } } return false, nil diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/apimachinery/pkg/util/mergepatch/util_test.go b/vendor/k8s.io/kubernetes/staging/src/k8s.io/apimachinery/pkg/util/mergepatch/util_test.go index 9e761c00..1b37e3ef 100644 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/apimachinery/pkg/util/mergepatch/util_test.go +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/apimachinery/pkg/util/mergepatch/util_test.go @@ -17,6 +17,7 @@ limitations under the License. package mergepatch import ( + "fmt" "testing" ) @@ -26,28 +27,29 @@ func TestHasConflicts(t *testing.T) { B interface{} Ret bool }{ - {A: "hello", B: "hello", Ret: false}, // 0 + {A: "hello", B: "hello", Ret: false}, {A: "hello", B: "hell", Ret: true}, {A: "hello", B: nil, Ret: true}, {A: "hello", B: 1, Ret: true}, {A: "hello", B: float64(1.0), Ret: true}, {A: "hello", B: false, Ret: true}, {A: 1, B: 1, Ret: false}, + {A: nil, B: nil, Ret: false}, {A: false, B: false, Ret: false}, {A: float64(3), B: float64(3), Ret: false}, - {A: "hello", B: []interface{}{}, Ret: true}, // 6 + {A: "hello", B: []interface{}{}, Ret: true}, {A: []interface{}{1}, B: []interface{}{}, Ret: true}, {A: []interface{}{}, B: []interface{}{}, Ret: false}, {A: []interface{}{1}, B: []interface{}{1}, Ret: false}, {A: map[string]interface{}{}, B: []interface{}{1}, Ret: true}, - {A: map[string]interface{}{}, B: map[string]interface{}{"a": 1}, Ret: false}, // 11 + {A: map[string]interface{}{}, B: map[string]interface{}{"a": 1}, Ret: false}, {A: map[string]interface{}{"a": 1}, B: map[string]interface{}{"a": 1}, Ret: false}, {A: map[string]interface{}{"a": 1}, B: map[string]interface{}{"a": 2}, Ret: true}, {A: map[string]interface{}{"a": 1}, B: map[string]interface{}{"b": 2}, Ret: false}, - { // 15 + { A: map[string]interface{}{"a": []interface{}{1}}, B: map[string]interface{}{"a": []interface{}{1}}, Ret: false, @@ -62,23 +64,75 @@ func TestHasConflicts(t *testing.T) { B: map[string]interface{}{"a": 1}, Ret: true, }, + + // Maps and lists with multiple entries. + { + A: map[string]interface{}{"a": 1, "b": 2}, + B: map[string]interface{}{"a": 1, "b": 0}, + Ret: true, + }, + { + A: map[string]interface{}{"a": 1, "b": 2}, + B: map[string]interface{}{"a": 1, "b": 2}, + Ret: false, + }, + { + A: map[string]interface{}{"a": 1, "b": 2}, + B: map[string]interface{}{"a": 1, "b": 0, "c": 3}, + Ret: true, + }, + { + A: map[string]interface{}{"a": 1, "b": 2}, + B: map[string]interface{}{"a": 1, "b": 2, "c": 3}, + Ret: false, + }, + { + A: map[string]interface{}{"a": []interface{}{1, 2}}, + B: map[string]interface{}{"a": []interface{}{1, 0}}, + Ret: true, + }, + { + A: map[string]interface{}{"a": []interface{}{1, 2}}, + B: map[string]interface{}{"a": []interface{}{1, 2}}, + Ret: false, + }, + + // Numeric types are not interchangeable. + // Callers are expected to ensure numeric types are consistent in 'left' and 'right'. + {A: int(0), B: int64(0), Ret: true}, + {A: int(0), B: float64(0), Ret: true}, + {A: int64(0), B: float64(0), Ret: true}, + // Other types are not interchangeable. + {A: int(0), B: "0", Ret: true}, + {A: int(0), B: nil, Ret: true}, + {A: int(0), B: false, Ret: true}, + {A: "true", B: true, Ret: true}, + {A: "null", B: nil, Ret: true}, } - for i, testCase := range testCases { - out, err := HasConflicts(testCase.A, testCase.B) - if err != nil { - t.Errorf("%d: unexpected error: %v", i, err) - } - if out != testCase.Ret { - t.Errorf("%d: expected %t got %t", i, testCase.Ret, out) - continue - } - out, err = HasConflicts(testCase.B, testCase.A) - if err != nil { - t.Errorf("%d: unexpected error: %v", i, err) - } - if out != testCase.Ret { - t.Errorf("%d: expected reversed %t got %t", i, testCase.Ret, out) + for _, testCase := range testCases { + testStr := fmt.Sprintf("A = %#v, B = %#v", testCase.A, testCase.B) + // Run each test case multiple times if it passes because HasConflicts() + // uses map iteration, which returns keys in nondeterministic order. + for try := 0; try < 10; try++ { + out, err := HasConflicts(testCase.A, testCase.B) + if err != nil { + t.Errorf("%v: unexpected error: %v", testStr, err) + break + } + if out != testCase.Ret { + t.Errorf("%v: expected %t got %t", testStr, testCase.Ret, out) + break + } + out, err = HasConflicts(testCase.B, testCase.A) + if err != nil { + t.Errorf("%v: unexpected error: %v", testStr, err) + break + } + if out != testCase.Ret { + t.Errorf("%v: expected reversed %t got %t", testStr, testCase.Ret, out) + break + } } } } diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/apimachinery/pkg/util/strategicpatch/patch.go b/vendor/k8s.io/kubernetes/staging/src/k8s.io/apimachinery/pkg/util/strategicpatch/patch.go index 2eb67af4..2be95612 100644 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/apimachinery/pkg/util/strategicpatch/patch.go +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/apimachinery/pkg/util/strategicpatch/patch.go @@ -476,6 +476,7 @@ func StrategicMergePatch(original, patch []byte, dataStruct interface{}) ([]byte // StrategicMergePatch applies a strategic merge patch. The original and patch documents // must be JSONMap. A patch can be created from an original and modified document by // calling CreateTwoWayMergeMapPatch. +// Warning: the original and patch JSONMap objects are mutated by this function and should not be reused. func StrategicMergeMapPatch(original, patch JSONMap, dataStruct interface{}) (JSONMap, error) { t, err := getTagStructType(dataStruct) if err != nil { diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/patch.go b/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/patch.go index c8b88907..48c20e5e 100644 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/patch.go +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/patch.go @@ -86,21 +86,21 @@ func strategicPatchObject( patchJS []byte, objToUpdate runtime.Object, versionedObj runtime.Object, -) (originalObjMap map[string]interface{}, patchMap map[string]interface{}, retErr error) { - originalObjMap = make(map[string]interface{}) +) error { + originalObjMap := make(map[string]interface{}) if err := unstructured.DefaultConverter.ToUnstructured(originalObject, &originalObjMap); err != nil { - return nil, nil, err + return err } - patchMap = make(map[string]interface{}) + patchMap := make(map[string]interface{}) if err := json.Unmarshal(patchJS, &patchMap); err != nil { - return nil, nil, err + return err } if err := applyPatchToObject(codec, defaulter, originalObjMap, patchMap, objToUpdate, versionedObj); err != nil { - return nil, nil, err + return err } - return + return nil } // applyPatchToObject applies a strategic merge patch of to diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/rest.go b/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/rest.go index c768e8a4..f47a164f 100644 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/rest.go +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/rest.go @@ -18,7 +18,6 @@ package handlers import ( "encoding/hex" - "encoding/json" "fmt" "io/ioutil" "math/rand" @@ -39,6 +38,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/json" "k8s.io/apimachinery/pkg/util/mergepatch" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/strategicpatch" @@ -104,8 +104,8 @@ func (scope *RequestScope) err(err error, w http.ResponseWriter, req *http.Reque // may be used to deserialize an options object to pass to the getter. type getterFunc func(ctx request.Context, name string, req *restful.Request) (runtime.Object, error) -// maxRetryWhenPatchConflicts is the maximum number of conflicts retry during a patch operation before returning failure -const maxRetryWhenPatchConflicts = 5 +// MaxRetryWhenPatchConflicts is the maximum number of conflicts retry during a patch operation before returning failure +const MaxRetryWhenPatchConflicts = 5 // getResourceHandler is an HTTP handler function for get requests. It delegates to the // passed-in getterFunc to perform the actual get. @@ -570,7 +570,7 @@ func patchResource( originalObjJS []byte originalPatchedObjJS []byte originalObjMap map[string]interface{} - originalPatchMap map[string]interface{} + getOriginalPatchMap func() (map[string]interface{}, error) lastConflictErr error originalResourceVersion string ) @@ -610,6 +610,26 @@ func patchResource( return nil, err } originalObjJS, originalPatchedObjJS = originalJS, patchedJS + + // Make a getter that can return a fresh strategic patch map if needed for conflict retries + // We have to rebuild it each time we need it, because the map gets mutated when being applied + var originalPatchBytes []byte + getOriginalPatchMap = func() (map[string]interface{}, error) { + if originalPatchBytes == nil { + // Compute once + originalPatchBytes, err = strategicpatch.CreateTwoWayMergePatch(originalObjJS, originalPatchedObjJS, versionedObj) + if err != nil { + return nil, err + } + } + // Return a fresh map every time + originalPatchMap := make(map[string]interface{}) + if err := json.Unmarshal(originalPatchBytes, &originalPatchMap); err != nil { + return nil, err + } + return originalPatchMap, nil + } + case types.StrategicMergePatchType: // Since the patch is applied on versioned objects, we need to convert the // current object to versioned representation first. @@ -621,8 +641,12 @@ func patchResource( if err != nil { return nil, err } - originalMap, patchMap, err := strategicPatchObject(codec, defaulter, currentVersionedObject, patchJS, versionedObjToUpdate, versionedObj) - if err != nil { + // Capture the original object map and patch for possible retries. + originalMap := make(map[string]interface{}) + if err := unstructured.DefaultConverter.ToUnstructured(currentVersionedObject, &originalMap); err != nil { + return nil, err + } + if err := strategicPatchObject(codec, defaulter, currentVersionedObject, patchJS, versionedObjToUpdate, versionedObj); err != nil { return nil, err } // Convert the object back to unversioned. @@ -632,8 +656,17 @@ func patchResource( return nil, err } objToUpdate = unversionedObjToUpdate - // Store unstructured representations for possible retries. - originalObjMap, originalPatchMap = originalMap, patchMap + // Store unstructured representation for possible retries. + originalObjMap = originalMap + // Make a getter that can return a fresh strategic patch map if needed for conflict retries + // We have to rebuild it each time we need it, because the map gets mutated when being applied + getOriginalPatchMap = func() (map[string]interface{}, error) { + patchMap := make(map[string]interface{}) + if err := json.Unmarshal(patchJS, &patchMap); err != nil { + return nil, err + } + return patchMap, nil + } } if err := checkName(objToUpdate, name, namespace, namer); err != nil { return nil, err @@ -669,17 +702,6 @@ func patchResource( return nil, err } } else { - if originalPatchMap == nil { - // Compute original patch, if we already didn't do this in previous retries. - originalPatch, err := strategicpatch.CreateTwoWayMergePatch(originalObjJS, originalPatchedObjJS, versionedObj) - if err != nil { - return nil, err - } - originalPatchMap = make(map[string]interface{}) - if err := json.Unmarshal(originalPatch, &originalPatchMap); err != nil { - return nil, err - } - } // Compute current patch. currentObjJS, err := runtime.Encode(codec, currentObject) if err != nil { @@ -695,6 +717,12 @@ func patchResource( } } + // Get a fresh copy of the original strategic patch each time through, since applying it mutates the map + originalPatchMap, err := getOriginalPatchMap() + if err != nil { + return nil, err + } + hasConflicts, err := mergepatch.HasConflicts(originalPatchMap, currentPatchMap) if err != nil { return nil, err @@ -742,7 +770,7 @@ func patchResource( return finishRequest(timeout, func() (runtime.Object, error) { updateObject, _, updateErr := patcher.Update(ctx, name, updatedObjectInfo) - for i := 0; i < maxRetryWhenPatchConflicts && (errors.IsConflict(updateErr)); i++ { + for i := 0; i < MaxRetryWhenPatchConflicts && (errors.IsConflict(updateErr)); i++ { lastConflictErr = updateErr updateObject, _, updateErr = patcher.Update(ctx, name, updatedObjectInfo) } diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/rest_test.go b/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/rest_test.go index bc0d2c3d..57e4ca36 100644 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/rest_test.go +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/rest_test.go @@ -74,7 +74,7 @@ func TestPatchAnonymousField(t *testing.T) { } actual := &testPatchType{} - _, _, err := strategicPatchObject(codec, defaulter, original, []byte(patch), actual, &testPatchType{}) + err := strategicPatchObject(codec, defaulter, original, []byte(patch), actual, &testPatchType{}) if err != nil { t.Fatalf("unexpected error: %v", err) } @@ -193,11 +193,6 @@ func (tc *patchTestCase) Run(t *testing.T) { } } - testPatcher := &testPatcher{} - testPatcher.t = t - testPatcher.startingPod = tc.startingPod - testPatcher.updatePod = tc.updatePod - ctx := request.NewDefaultContext() ctx = request.WithNamespace(ctx, namespace) @@ -211,6 +206,13 @@ func (tc *patchTestCase) Run(t *testing.T) { versionedObj := &v1.Pod{} for _, patchType := range []types.PatchType{types.JSONPatchType, types.MergePatchType, types.StrategicMergePatchType} { + // This needs to be reset on each iteration. + testPatcher := &testPatcher{ + t: t, + startingPod: tc.startingPod, + updatePod: tc.updatePod, + } + // TODO SUPPORT THIS! if patchType == types.JSONPatchType { continue @@ -220,12 +222,12 @@ func (tc *patchTestCase) Run(t *testing.T) { originalObjJS, err := runtime.Encode(codec, tc.startingPod) if err != nil { t.Errorf("%s: unexpected error: %v", tc.name, err) - return + continue } changedJS, err := runtime.Encode(codec, tc.changedPod) if err != nil { t.Errorf("%s: unexpected error: %v", tc.name, err) - return + continue } patch := []byte{} @@ -237,14 +239,14 @@ func (tc *patchTestCase) Run(t *testing.T) { patch, err = strategicpatch.CreateTwoWayMergePatch(originalObjJS, changedJS, versionedObj) if err != nil { t.Errorf("%s: unexpected error: %v", tc.name, err) - return + continue } case types.MergePatchType: patch, err = jsonpatch.CreateMergePatch(originalObjJS, changedJS) if err != nil { t.Errorf("%s: unexpected error: %v", tc.name, err) - return + continue } } @@ -253,12 +255,12 @@ func (tc *patchTestCase) Run(t *testing.T) { if len(tc.expectedError) != 0 { if err == nil || err.Error() != tc.expectedError { t.Errorf("%s: expected error %v, but got %v", tc.name, tc.expectedError, err) - return + continue } } else { if err != nil { t.Errorf("%s: unexpected error: %v", tc.name, err) - return + continue } } @@ -266,7 +268,7 @@ func (tc *patchTestCase) Run(t *testing.T) { if resultObj != nil { t.Errorf("%s: unexpected result: %v", tc.name, resultObj) } - return + continue } resultPod := resultObj.(*api.Pod) @@ -275,18 +277,18 @@ func (tc *patchTestCase) Run(t *testing.T) { expectedJS, err := runtime.Encode(codec, tc.expectedPod) if err != nil { t.Errorf("%s: unexpected error: %v", tc.name, err) - return + continue } expectedObj, err := runtime.Decode(codec, expectedJS) if err != nil { t.Errorf("%s: unexpected error: %v", tc.name, err) - return + continue } reallyExpectedPod := expectedObj.(*api.Pod) if !reflect.DeepEqual(*reallyExpectedPod, *resultPod) { t.Errorf("%s mismatch: %v\n", tc.name, diff.ObjectGoPrintDiff(reallyExpectedPod, resultPod)) - return + continue } } @@ -314,7 +316,7 @@ func TestNumberConversion(t *testing.T) { patchJS := []byte(`{"spec":{"ports":[{"port":80,"nodePort":31789}]}}`) - _, _, err := strategicPatchObject(codec, defaulter, currentVersionedObject, patchJS, versionedObjToUpdate, versionedObj) + err := strategicPatchObject(codec, defaulter, currentVersionedObject, patchJS, versionedObjToUpdate, versionedObj) if err != nil { t.Fatal(err) } @@ -324,6 +326,59 @@ func TestNumberConversion(t *testing.T) { } } +func TestPatchResourceNumberConversion(t *testing.T) { + namespace := "bar" + name := "foo" + uid := types.UID("uid") + fifteen := int64(15) + thirty := int64(30) + + tc := &patchTestCase{ + name: "TestPatchResourceNumberConversion", + + startingPod: &api.Pod{}, + changedPod: &api.Pod{}, + updatePod: &api.Pod{}, + + expectedPod: &api.Pod{}, + } + + tc.startingPod.Name = name + tc.startingPod.Namespace = namespace + tc.startingPod.UID = uid + tc.startingPod.ResourceVersion = "1" + tc.startingPod.APIVersion = "v1" + tc.startingPod.Spec.ActiveDeadlineSeconds = &fifteen + + // Patch tries to change to 30. + tc.changedPod.Name = name + tc.changedPod.Namespace = namespace + tc.changedPod.UID = uid + tc.changedPod.ResourceVersion = "1" + tc.changedPod.APIVersion = "v1" + tc.changedPod.Spec.ActiveDeadlineSeconds = &thirty + + // Someone else already changed it to 30. + // This should be fine since it's not a "meaningful conflict". + // Previously this was detected as a meaningful conflict because int64(30) != float64(30). + tc.updatePod.Name = name + tc.updatePod.Namespace = namespace + tc.updatePod.UID = uid + tc.updatePod.ResourceVersion = "2" + tc.updatePod.APIVersion = "v1" + tc.updatePod.Spec.ActiveDeadlineSeconds = &thirty + tc.updatePod.Spec.NodeName = "anywhere" + + tc.expectedPod.Name = name + tc.expectedPod.Namespace = namespace + tc.expectedPod.UID = uid + tc.expectedPod.ResourceVersion = "2" + tc.expectedPod.Spec.ActiveDeadlineSeconds = &thirty + tc.expectedPod.Spec.NodeName = "anywhere" + + tc.Run(t) +} + func TestPatchResourceWithVersionConflict(t *testing.T) { namespace := "bar" name := "foo" diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/endpoints/metrics/metrics.go b/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/endpoints/metrics/metrics.go index 2ae4a26e..863ad98b 100644 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/endpoints/metrics/metrics.go +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/endpoints/metrics/metrics.go @@ -20,6 +20,7 @@ import ( "bufio" "net" "net/http" + "regexp" "strconv" "strings" "time" @@ -58,6 +59,7 @@ var ( }, []string{"verb", "resource"}, ) + kubectlExeRegexp = regexp.MustCompile(`^.*((?i:kubectl\.exe))`) ) // Register all metrics. @@ -109,9 +111,12 @@ func InstrumentRouteFunc(verb, resource string, routeFunc restful.RouteFunction) } func cleanUserAgent(ua string) string { + // We collapse all "web browser"-type user agents into one "browser" to reduce metric cardinality. if strings.HasPrefix(ua, "Mozilla/") { return "Browser" } + // If an old "kubectl.exe" has passed us its full path, we discard the path portion. + ua = kubectlExeRegexp.ReplaceAllString(ua, "$1") return ua } diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/endpoints/metrics/metrics_test.go b/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/endpoints/metrics/metrics_test.go index 886bc1f6..4c0a8aa5 100644 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/endpoints/metrics/metrics_test.go +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/endpoints/metrics/metrics_test.go @@ -19,6 +19,8 @@ package metrics import "testing" func TestCleanUserAgent(t *testing.T) { + panicBuf := []byte{198, 73, 129, 133, 90, 216, 104, 29, 13, 134, 209, 233, 30, 0, 22} + for _, tc := range []struct { In string Out string @@ -31,6 +33,19 @@ func TestCleanUserAgent(t *testing.T) { In: "kubectl/v1.2.4", Out: "kubectl/v1.2.4", }, + { + In: `C:\Users\Kubernetes\kubectl.exe/v1.5.4`, + Out: "kubectl.exe/v1.5.4", + }, + { + In: `C:\Program Files\kubectl.exe/v1.5.4`, + Out: "kubectl.exe/v1.5.4", + }, + { + // This malicious input courtesy of enisoc. + In: string(panicBuf) + "kubectl.exe", + Out: "kubectl.exe", + }, } { if cleanUserAgent(tc.In) != tc.Out { t.Errorf("Failed to clean User-Agent: %s", tc.In) diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/server/config.go b/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/server/config.go index 01fc5adc..9f8d6ab6 100644 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/server/config.go +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/server/config.go @@ -95,6 +95,8 @@ type Config struct { EnableContentionProfiling bool EnableMetrics bool + DisabledPostStartHooks sets.String + // Version will enable the /version endpoint if non-nil Version *version.Info // AuditWriter is the destination for audit logs. If nil, they will not be written. @@ -197,6 +199,7 @@ func NewConfig() *Config { RequestContextMapper: apirequest.NewRequestContextMapper(), BuildHandlerChainsFunc: DefaultBuildHandlerChain, LegacyAPIGroupPrefixes: sets.NewString(DefaultLegacyAPIPrefix), + DisabledPostStartHooks: sets.NewString(), HealthzChecks: []healthz.HealthzChecker{healthz.PingHealthz}, EnableIndex: true, EnableProfiling: true, @@ -395,8 +398,10 @@ func (c completedConfig) New() (*GenericAPIServer, error) { swaggerConfig: c.SwaggerConfig, openAPIConfig: c.OpenAPIConfig, - postStartHooks: map[string]postStartHookEntry{}, - healthzChecks: c.HealthzChecks, + postStartHooks: map[string]postStartHookEntry{}, + disabledPostStartHooks: c.DisabledPostStartHooks, + + healthzChecks: c.HealthzChecks, } s.HandlerContainer = mux.NewAPIContainer(http.NewServeMux(), c.Serializer) diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/server/genericapiserver.go b/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/server/genericapiserver.go index 331e3a44..11a785e3 100644 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/server/genericapiserver.go +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/server/genericapiserver.go @@ -138,10 +138,11 @@ type GenericAPIServer struct { // PostStartHooks are each called after the server has started listening, in a separate go func for each // with no guarantee of ordering between them. The map key is a name used for error reporting. - // It may kill the process with a panic if it wishes to by returning an error - postStartHookLock sync.Mutex - postStartHooks map[string]postStartHookEntry - postStartHooksCalled bool + // It may kill the process with a panic if it wishes to by returning an error. + postStartHookLock sync.Mutex + postStartHooks map[string]postStartHookEntry + postStartHooksCalled bool + disabledPostStartHooks sets.String // healthz checks healthzLock sync.Mutex diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/server/hooks.go b/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/server/hooks.go index 16e1cb5c..89ba6288 100644 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/server/hooks.go +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/server/hooks.go @@ -65,6 +65,9 @@ func (s *GenericAPIServer) AddPostStartHook(name string, hook PostStartHookFunc) if hook == nil { return nil } + if s.disabledPostStartHooks.Has(name) { + return nil + } s.postStartHookLock.Lock() defer s.postStartHookLock.Unlock() diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/client-go/pkg/api/v1/helpers.go b/vendor/k8s.io/kubernetes/staging/src/k8s.io/client-go/pkg/api/v1/helpers.go index 01f4ef47..82d086de 100644 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/client-go/pkg/api/v1/helpers.go +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/client-go/pkg/api/v1/helpers.go @@ -276,10 +276,10 @@ const ( AffinityAnnotationKey string = "scheduler.alpha.kubernetes.io/affinity" ) -// Tries to add a toleration to annotations list. Returns true if something was updated -// false otherwise. -func AddOrUpdateTolerationInPod(pod *Pod, toleration *Toleration) (bool, error) { - podTolerations := pod.Spec.Tolerations +// AddOrUpdateTolerationInPodSpec tries to add a toleration to the toleration list in PodSpec. +// Returns true if something was updated, false otherwise. +func AddOrUpdateTolerationInPodSpec(spec *PodSpec, toleration *Toleration) (bool, error) { + podTolerations := spec.Tolerations var newTolerations []Toleration updated := false @@ -300,10 +300,16 @@ func AddOrUpdateTolerationInPod(pod *Pod, toleration *Toleration) (bool, error) newTolerations = append(newTolerations, *toleration) } - pod.Spec.Tolerations = newTolerations + spec.Tolerations = newTolerations return true, nil } +// AddOrUpdateTolerationInPod tries to add a toleration to the pod's toleration list. +// Returns true if something was updated, false otherwise. +func AddOrUpdateTolerationInPod(pod *Pod, toleration *Toleration) (bool, error) { + return AddOrUpdateTolerationInPodSpec(&pod.Spec, toleration) +} + // MatchToleration checks if the toleration matches tolerationToMatch. Tolerations are unique by , // if the two tolerations have same combination, regard as they match. // TODO: uniqueness check for tolerations in api validations. diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/client-go/pkg/apis/batch/types.go b/vendor/k8s.io/kubernetes/staging/src/k8s.io/client-go/pkg/apis/batch/types.go index 7a2ad011..f47ea670 100644 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/client-go/pkg/apis/batch/types.go +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/client-go/pkg/apis/batch/types.go @@ -233,6 +233,7 @@ type CronJobSpec struct { StartingDeadlineSeconds *int64 // ConcurrencyPolicy specifies how to treat concurrent executions of a Job. + // Defaults to Allow. // +optional ConcurrencyPolicy ConcurrencyPolicy diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/client-go/pkg/apis/batch/v2alpha1/generated.proto b/vendor/k8s.io/kubernetes/staging/src/k8s.io/client-go/pkg/apis/batch/v2alpha1/generated.proto index 4f51d616..605dafe3 100644 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/client-go/pkg/apis/batch/v2alpha1/generated.proto +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/client-go/pkg/apis/batch/v2alpha1/generated.proto @@ -72,6 +72,7 @@ message CronJobSpec { optional int64 startingDeadlineSeconds = 2; // ConcurrencyPolicy specifies how to treat concurrent executions of a Job. + // Defaults to Allow. // +optional optional string concurrencyPolicy = 3; diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/client-go/pkg/apis/batch/v2alpha1/types.go b/vendor/k8s.io/kubernetes/staging/src/k8s.io/client-go/pkg/apis/batch/v2alpha1/types.go index 67f1c95e..1dfd4eee 100644 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/client-go/pkg/apis/batch/v2alpha1/types.go +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/client-go/pkg/apis/batch/v2alpha1/types.go @@ -94,6 +94,7 @@ type CronJobSpec struct { StartingDeadlineSeconds *int64 `json:"startingDeadlineSeconds,omitempty" protobuf:"varint,2,opt,name=startingDeadlineSeconds"` // ConcurrencyPolicy specifies how to treat concurrent executions of a Job. + // Defaults to Allow. // +optional ConcurrencyPolicy ConcurrencyPolicy `json:"concurrencyPolicy,omitempty" protobuf:"bytes,3,opt,name=concurrencyPolicy,casttype=ConcurrencyPolicy"` diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/client-go/pkg/apis/batch/v2alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/kubernetes/staging/src/k8s.io/client-go/pkg/apis/batch/v2alpha1/types_swagger_doc_generated.go index c0b53b8e..dc4ff64c 100644 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/client-go/pkg/apis/batch/v2alpha1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/client-go/pkg/apis/batch/v2alpha1/types_swagger_doc_generated.go @@ -52,7 +52,7 @@ var map_CronJobSpec = map[string]string{ "": "CronJobSpec describes how the job execution will look like and when it will actually run.", "schedule": "Schedule contains the schedule in Cron format, see https://en.wikipedia.org/wiki/Cron.", "startingDeadlineSeconds": "Optional deadline in seconds for starting the job if it misses scheduled time for any reason. Missed jobs executions will be counted as failed ones.", - "concurrencyPolicy": "ConcurrencyPolicy specifies how to treat concurrent executions of a Job.", + "concurrencyPolicy": "ConcurrencyPolicy specifies how to treat concurrent executions of a Job. Defaults to Allow.", "suspend": "Suspend flag tells the controller to suspend subsequent executions, it does not apply to already started executions. Defaults to false.", "jobTemplate": "JobTemplate is the object that describes the job that will be created when executing a CronJob.", "successfulJobsHistoryLimit": "The number of successful finished jobs to retain. This is a pointer to distinguish between explicit zero and not specified.", diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/client-go/pkg/version/base.go b/vendor/k8s.io/kubernetes/staging/src/k8s.io/client-go/pkg/version/base.go index f0f7338c..52c0311a 100644 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/client-go/pkg/version/base.go +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/client-go/pkg/version/base.go @@ -51,7 +51,7 @@ var ( // semantic version is a git hash, but the version itself is no // longer the direct output of "git describe", but a slight // translation to be semver compliant. - gitVersion string = "v1.6.1-beta.0+$Format:%h$" + gitVersion string = "v1.6.4-beta.1+$Format:%h$" gitCommit string = "$Format:%H$" // sha1 from git, output of $(git rev-parse HEAD) gitTreeState string = "not a git tree" // state of git tree, either "clean" or "dirty" diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/client-go/rest/config.go b/vendor/k8s.io/kubernetes/staging/src/k8s.io/client-go/rest/config.go index 2a2c03df..07e724f2 100644 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/client-go/rest/config.go +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/client-go/rest/config.go @@ -22,7 +22,7 @@ import ( "net" "net/http" "os" - "path" + "path/filepath" gruntime "runtime" "strings" "time" @@ -255,19 +255,51 @@ func SetKubernetesDefaults(config *Config) error { return nil } -// DefaultKubernetesUserAgent returns the default user agent that clients can use. +// adjustCommit returns sufficient significant figures of the commit's git hash. +func adjustCommit(c string) string { + if len(c) == 0 { + return "unknown" + } + if len(c) > 7 { + return c[:7] + } + return c +} + +// adjustVersion strips "alpha", "beta", etc. from version in form +// major.minor.patch-[alpha|beta|etc]. +func adjustVersion(v string) string { + if len(v) == 0 { + return "unknown" + } + seg := strings.SplitN(v, "-", 2) + return seg[0] +} + +// adjustCommand returns the last component of the +// OS-specific command path for use in User-Agent. +func adjustCommand(p string) string { + // Unlikely, but better than returning "". + if len(p) == 0 { + return "unknown" + } + return filepath.Base(p) +} + +// buildUserAgent builds a User-Agent string from given args. +func buildUserAgent(command, version, os, arch, commit string) string { + return fmt.Sprintf( + "%s/%s (%s/%s) kubernetes/%s", command, version, os, arch, commit) +} + +// DefaultKubernetesUserAgent returns a User-Agent string built from static global vars. func DefaultKubernetesUserAgent() string { - commit := version.Get().GitCommit - if len(commit) > 7 { - commit = commit[:7] - } - if len(commit) == 0 { - commit = "unknown" - } - version := version.Get().GitVersion - seg := strings.SplitN(version, "-", 2) - version = seg[0] - return fmt.Sprintf("%s/%s (%s/%s) kubernetes/%s", path.Base(os.Args[0]), version, gruntime.GOOS, gruntime.GOARCH, commit) + return buildUserAgent( + adjustCommand(os.Args[0]), + adjustVersion(version.Get().GitVersion), + gruntime.GOOS, + gruntime.GOARCH, + adjustCommit(version.Get().GitCommit)) } // InClusterConfig returns a config object which uses the service account diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/client-go/rest/config_test.go b/vendor/k8s.io/kubernetes/staging/src/k8s.io/client-go/rest/config_test.go index 5b9071d7..913493bd 100644 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/client-go/rest/config_test.go +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/client-go/rest/config_test.go @@ -19,6 +19,7 @@ package rest import ( "io" "net/http" + "path/filepath" "reflect" "strings" "testing" @@ -32,6 +33,7 @@ import ( clientcmdapi "k8s.io/client-go/tools/clientcmd/api" "k8s.io/client-go/util/flowcontrol" + "github.com/stretchr/testify/assert" _ "k8s.io/client-go/pkg/api/install" ) @@ -100,6 +102,42 @@ func TestSetKubernetesDefaultsUserAgent(t *testing.T) { } } +func TestAdjustVersion(t *testing.T) { + assert := assert.New(t) + assert.Equal("1.2.3", adjustVersion("1.2.3-alpha4")) + assert.Equal("1.2.3", adjustVersion("1.2.3-alpha")) + assert.Equal("1.2.3", adjustVersion("1.2.3")) + assert.Equal("unknown", adjustVersion("")) +} + +func TestAdjustCommit(t *testing.T) { + assert := assert.New(t) + assert.Equal("1234567", adjustCommit("1234567890")) + assert.Equal("123456", adjustCommit("123456")) + assert.Equal("unknown", adjustCommit("")) +} + +func TestAdjustCommand(t *testing.T) { + assert := assert.New(t) + assert.Equal("beans", adjustCommand(filepath.Join("home", "bob", "Downloads", "beans"))) + assert.Equal("beans", adjustCommand(filepath.Join(".", "beans"))) + assert.Equal("beans", adjustCommand("beans")) + assert.Equal("unknown", adjustCommand("")) +} + +func TestBuildUserAgent(t *testing.T) { + assert.New(t).Equal( + "lynx/nicest (beos/itanium) kubernetes/baaaaaaaaad", + buildUserAgent( + "lynx", "nicest", + "beos", "itanium", "baaaaaaaaad")) +} + +// This function untestable since it doesn't accept arguments. +func TestDefaultKubernetesUserAgent(t *testing.T) { + assert.New(t).Contains(DefaultKubernetesUserAgent(), "kubernetes") +} + func TestRESTClientRequires(t *testing.T) { if _, err := RESTClientFor(&Config{Host: "127.0.0.1", ContentConfig: ContentConfig{NegotiatedSerializer: api.Codecs}}); err == nil { t.Errorf("unexpected non-error") diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/client-go/tools/clientcmd/client_config.go b/vendor/k8s.io/kubernetes/staging/src/k8s.io/client-go/tools/clientcmd/client_config.go index 0411e623..307d1216 100644 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/client-go/tools/clientcmd/client_config.go +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/client-go/tools/clientcmd/client_config.go @@ -482,13 +482,13 @@ func (config *inClusterClientConfig) Namespace() (string, bool, error) { // This way assumes you've set the POD_NAMESPACE environment variable using the downward API. // This check has to be done first for backwards compatibility with the way InClusterConfig was originally set up if ns := os.Getenv("POD_NAMESPACE"); ns != "" { - return ns, true, nil + return ns, false, nil } // Fall back to the namespace associated with the service account token, if available if data, err := ioutil.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/namespace"); err == nil { if ns := strings.TrimSpace(string(data)); len(ns) > 0 { - return ns, true, nil + return ns, false, nil } } diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/client-go/tools/clientcmd/merged_client_builder.go b/vendor/k8s.io/kubernetes/staging/src/k8s.io/client-go/tools/clientcmd/merged_client_builder.go index 92c1a5a0..f3eb6a40 100644 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/client-go/tools/clientcmd/merged_client_builder.go +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/client-go/tools/clientcmd/merged_client_builder.go @@ -22,6 +22,7 @@ import ( "github.com/golang/glog" + "k8s.io/client-go/pkg/api" restclient "k8s.io/client-go/rest" clientcmdapi "k8s.io/client-go/tools/clientcmd/api" ) @@ -134,12 +135,26 @@ func (config *DeferredLoadingClientConfig) Namespace() (string, bool, error) { return "", false, err } - ns, ok, err := mergedKubeConfig.Namespace() + ns, overridden, err := mergedKubeConfig.Namespace() // if we get an error and it is not empty config, or if the merged config defined an explicit namespace, or // if in-cluster config is not possible, return immediately - if (err != nil && !IsEmptyConfig(err)) || ok || !config.icc.Possible() { + if (err != nil && !IsEmptyConfig(err)) || overridden || !config.icc.Possible() { // return on any error except empty config - return ns, ok, err + return ns, overridden, err + } + + if len(ns) > 0 { + // if we got a non-default namespace from the kubeconfig, use it + if ns != api.NamespaceDefault { + return ns, false, nil + } + + // if we got a default namespace, determine whether it was explicit or implicit + if raw, err := mergedKubeConfig.RawConfig(); err == nil { + if context := raw.Contexts[raw.CurrentContext]; context != nil && len(context.Namespace) > 0 { + return ns, false, nil + } + } } glog.V(4).Infof("Using in-cluster namespace") diff --git a/vendor/k8s.io/kubernetes/test/e2e/common/empty_dir.go b/vendor/k8s.io/kubernetes/test/e2e/common/empty_dir.go index 7b358849..7f441520 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/common/empty_dir.go +++ b/vendor/k8s.io/kubernetes/test/e2e/common/empty_dir.go @@ -32,6 +32,7 @@ import ( const ( testImageRootUid = "gcr.io/google_containers/mounttest:0.8" testImageNonRootUid = "gcr.io/google_containers/mounttest-user:0.5" + volumePath = "/test-volume" ) var _ = framework.KubeDescribe("EmptyDir volumes", func() { @@ -47,6 +48,10 @@ var _ = framework.KubeDescribe("EmptyDir volumes", func() { doTestSetgidFSGroup(f, testImageNonRootUid, v1.StorageMediumMemory) }) + It("nonexistent volume subPath should have the correct mode and owner using FSGroup [Volume]", func() { + doTestSubPathFSGroup(f, testImageNonRootUid, v1.StorageMediumMemory) + }) + It("files with FSGroup ownership should support (root,0644,tmpfs) [Volume]", func() { doTest0644FSGroup(f, testImageRootUid, v1.StorageMediumMemory) }) @@ -124,10 +129,9 @@ const ( func doTestSetgidFSGroup(f *framework.Framework, image string, medium v1.StorageMedium) { var ( - volumePath = "/test-volume" - filePath = path.Join(volumePath, "test-file") - source = &v1.EmptyDirVolumeSource{Medium: medium} - pod = testPodWithVolume(testImageRootUid, volumePath, source) + filePath = path.Join(volumePath, "test-file") + source = &v1.EmptyDirVolumeSource{Medium: medium} + pod = testPodWithVolume(testImageRootUid, volumePath, source) ) pod.Spec.Containers[0].Args = []string{ @@ -152,11 +156,40 @@ func doTestSetgidFSGroup(f *framework.Framework, image string, medium v1.Storage f.TestContainerOutput(msg, pod, 0, out) } +func doTestSubPathFSGroup(f *framework.Framework, image string, medium v1.StorageMedium) { + var ( + subPath = "test-sub" + source = &v1.EmptyDirVolumeSource{Medium: medium} + pod = testPodWithVolume(image, volumePath, source) + ) + + pod.Spec.Containers[0].Args = []string{ + fmt.Sprintf("--fs_type=%v", volumePath), + fmt.Sprintf("--file_perm=%v", volumePath), + fmt.Sprintf("--file_owner=%v", volumePath), + } + + pod.Spec.Containers[0].VolumeMounts[0].SubPath = subPath + + fsGroup := int64(123) + pod.Spec.SecurityContext.FSGroup = &fsGroup + + msg := fmt.Sprintf("emptydir subpath on %v", formatMedium(medium)) + out := []string{ + "perms of file \"/test-volume\": -rwxrwxrwx", + "owner UID of \"/test-volume\": 0", + "owner GID of \"/test-volume\": 123", + } + if medium == v1.StorageMediumMemory { + out = append(out, "mount type of \"/test-volume\": tmpfs") + } + f.TestContainerOutput(msg, pod, 0, out) +} + func doTestVolumeModeFSGroup(f *framework.Framework, image string, medium v1.StorageMedium) { var ( - volumePath = "/test-volume" - source = &v1.EmptyDirVolumeSource{Medium: medium} - pod = testPodWithVolume(testImageRootUid, volumePath, source) + source = &v1.EmptyDirVolumeSource{Medium: medium} + pod = testPodWithVolume(testImageRootUid, volumePath, source) ) pod.Spec.Containers[0].Args = []string{ @@ -179,10 +212,9 @@ func doTestVolumeModeFSGroup(f *framework.Framework, image string, medium v1.Sto func doTest0644FSGroup(f *framework.Framework, image string, medium v1.StorageMedium) { var ( - volumePath = "/test-volume" - filePath = path.Join(volumePath, "test-file") - source = &v1.EmptyDirVolumeSource{Medium: medium} - pod = testPodWithVolume(image, volumePath, source) + filePath = path.Join(volumePath, "test-file") + source = &v1.EmptyDirVolumeSource{Medium: medium} + pod = testPodWithVolume(image, volumePath, source) ) pod.Spec.Containers[0].Args = []string{ @@ -207,9 +239,8 @@ func doTest0644FSGroup(f *framework.Framework, image string, medium v1.StorageMe func doTestVolumeMode(f *framework.Framework, image string, medium v1.StorageMedium) { var ( - volumePath = "/test-volume" - source = &v1.EmptyDirVolumeSource{Medium: medium} - pod = testPodWithVolume(testImageRootUid, volumePath, source) + source = &v1.EmptyDirVolumeSource{Medium: medium} + pod = testPodWithVolume(testImageRootUid, volumePath, source) ) pod.Spec.Containers[0].Args = []string{ @@ -229,10 +260,9 @@ func doTestVolumeMode(f *framework.Framework, image string, medium v1.StorageMed func doTest0644(f *framework.Framework, image string, medium v1.StorageMedium) { var ( - volumePath = "/test-volume" - filePath = path.Join(volumePath, "test-file") - source = &v1.EmptyDirVolumeSource{Medium: medium} - pod = testPodWithVolume(image, volumePath, source) + filePath = path.Join(volumePath, "test-file") + source = &v1.EmptyDirVolumeSource{Medium: medium} + pod = testPodWithVolume(image, volumePath, source) ) pod.Spec.Containers[0].Args = []string{ @@ -254,10 +284,9 @@ func doTest0644(f *framework.Framework, image string, medium v1.StorageMedium) { func doTest0666(f *framework.Framework, image string, medium v1.StorageMedium) { var ( - volumePath = "/test-volume" - filePath = path.Join(volumePath, "test-file") - source = &v1.EmptyDirVolumeSource{Medium: medium} - pod = testPodWithVolume(image, volumePath, source) + filePath = path.Join(volumePath, "test-file") + source = &v1.EmptyDirVolumeSource{Medium: medium} + pod = testPodWithVolume(image, volumePath, source) ) pod.Spec.Containers[0].Args = []string{ @@ -279,10 +308,9 @@ func doTest0666(f *framework.Framework, image string, medium v1.StorageMedium) { func doTest0777(f *framework.Framework, image string, medium v1.StorageMedium) { var ( - volumePath = "/test-volume" - filePath = path.Join(volumePath, "test-file") - source = &v1.EmptyDirVolumeSource{Medium: medium} - pod = testPodWithVolume(image, volumePath, source) + filePath = path.Join(volumePath, "test-file") + source = &v1.EmptyDirVolumeSource{Medium: medium} + pod = testPodWithVolume(image, volumePath, source) ) pod.Spec.Containers[0].Args = []string{ diff --git a/vendor/k8s.io/kubernetes/test/e2e/common/host_path.go b/vendor/k8s.io/kubernetes/test/e2e/common/host_path.go index e57a6e9a..c2f8e470 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/common/host_path.go +++ b/vendor/k8s.io/kubernetes/test/e2e/common/host_path.go @@ -35,12 +35,12 @@ var _ = framework.KubeDescribe("HostPath", func() { f := framework.NewDefaultFramework("hostpath") BeforeEach(func() { + // TODO permission denied cleanup failures //cleanup before running the test. _ = os.Remove("/tmp/test-file") }) It("should give a volume the correct mode [Conformance] [Volume]", func() { - volumePath := "/test-volume" source := &v1.HostPathVolumeSource{ Path: "/tmp", } @@ -57,7 +57,6 @@ var _ = framework.KubeDescribe("HostPath", func() { // This test requires mounting a folder into a container with write privileges. It("should support r/w [Volume]", func() { - volumePath := "/test-volume" filePath := path.Join(volumePath, "test-file") retryDuration := 180 source := &v1.HostPathVolumeSource{ @@ -82,7 +81,6 @@ var _ = framework.KubeDescribe("HostPath", func() { }) It("should support subPath [Volume]", func() { - volumePath := "/test-volume" subPath := "sub-path" fileName := "test-file" retryDuration := 180 @@ -94,6 +92,7 @@ var _ = framework.KubeDescribe("HostPath", func() { Path: "/tmp", } pod := testPodWithHostVol(volumePath, source) + // Write the file in the subPath from container 0 container := &pod.Spec.Containers[0] container.VolumeMounts[0].SubPath = subPath @@ -101,6 +100,92 @@ var _ = framework.KubeDescribe("HostPath", func() { fmt.Sprintf("--new_file_0644=%v", filePathInWriter), fmt.Sprintf("--file_mode=%v", filePathInWriter), } + + // Read it from outside the subPath from container 1 + pod.Spec.Containers[1].Args = []string{ + fmt.Sprintf("--file_content_in_loop=%v", filePathInReader), + fmt.Sprintf("--retry_time=%d", retryDuration), + } + + f.TestContainerOutput("hostPath subPath", pod, 1, []string{ + "content of file \"" + filePathInReader + "\": mount-tester new file", + }) + }) + + It("should support existing directory subPath [Volume]", func() { + framework.SkipUnlessSSHKeyPresent() + + subPath := "sub-path" + fileName := "test-file" + retryDuration := 180 + + filePathInWriter := path.Join(volumePath, fileName) + filePathInReader := path.Join(volumePath, subPath, fileName) + + source := &v1.HostPathVolumeSource{ + Path: "/tmp", + } + pod := testPodWithHostVol(volumePath, source) + nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet) + pod.Spec.NodeName = nodeList.Items[0].Name + + // Create the subPath directory on the host + existing := path.Join(source.Path, subPath) + result, err := framework.SSH(fmt.Sprintf("mkdir -p %s", existing), framework.GetNodeExternalIP(&nodeList.Items[0]), framework.TestContext.Provider) + framework.LogSSHResult(result) + framework.ExpectNoError(err) + if result.Code != 0 { + framework.Failf("mkdir returned non-zero") + } + + // Write the file in the subPath from container 0 + container := &pod.Spec.Containers[0] + container.VolumeMounts[0].SubPath = subPath + container.Args = []string{ + fmt.Sprintf("--new_file_0644=%v", filePathInWriter), + fmt.Sprintf("--file_mode=%v", filePathInWriter), + } + + // Read it from outside the subPath from container 1 + pod.Spec.Containers[1].Args = []string{ + fmt.Sprintf("--file_content_in_loop=%v", filePathInReader), + fmt.Sprintf("--retry_time=%d", retryDuration), + } + + f.TestContainerOutput("hostPath subPath", pod, 1, []string{ + "content of file \"" + filePathInReader + "\": mount-tester new file", + }) + }) + + // TODO consolidate common code of this test and above + It("should support existing single file subPath [Volume]", func() { + framework.SkipUnlessSSHKeyPresent() + + subPath := "sub-path-test-file" + retryDuration := 180 + + filePathInReader := path.Join(volumePath, subPath) + + source := &v1.HostPathVolumeSource{ + Path: "/tmp", + } + pod := testPodWithHostVol(volumePath, source) + nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet) + pod.Spec.NodeName = nodeList.Items[0].Name + + // Create the subPath file on the host + existing := path.Join(source.Path, subPath) + result, err := framework.SSH(fmt.Sprintf("echo \"mount-tester new file\" > %s", existing), framework.GetNodeExternalIP(&nodeList.Items[0]), framework.TestContext.Provider) + framework.LogSSHResult(result) + framework.ExpectNoError(err) + if result.Code != 0 { + framework.Failf("echo returned non-zero") + } + + // Mount the file to the subPath in container 0 + container := &pod.Spec.Containers[0] + container.VolumeMounts[0].SubPath = subPath + // Read it from outside the subPath from container 1 pod.Spec.Containers[1].Args = []string{ fmt.Sprintf("--file_content_in_loop=%v", filePathInReader), diff --git a/vendor/k8s.io/kubernetes/test/e2e/framework/util.go b/vendor/k8s.io/kubernetes/test/e2e/framework/util.go index ff126617..16003406 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/framework/util.go +++ b/vendor/k8s.io/kubernetes/test/e2e/framework/util.go @@ -307,6 +307,12 @@ func SkipIfProviderIs(unsupportedProviders ...string) { } } +func SkipUnlessSSHKeyPresent() { + if _, err := GetSigner(TestContext.Provider); err != nil { + Skipf("No SSH Key for provider %s: '%v'", TestContext.Provider, err) + } +} + func SkipUnlessProviderIs(supportedProviders ...string) { if !ProviderIs(supportedProviders...) { Skipf("Only supported for providers %v (not %s)", supportedProviders, TestContext.Provider) diff --git a/vendor/k8s.io/kubernetes/test/e2e/kubectl.go b/vendor/k8s.io/kubernetes/test/e2e/kubectl.go index ea2f70cb..9c6f6959 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/kubectl.go +++ b/vendor/k8s.io/kubernetes/test/e2e/kubectl.go @@ -580,7 +580,7 @@ var _ = framework.KubeDescribe("Kubectl client", func() { It("should handle in-cluster config", func() { By("adding rbac permissions") - // grant the view permission widely to allow inspection of the `invalid` namespace. + // grant the view permission widely to allow inspection of the `invalid` namespace and the default namespace framework.BindClusterRole(f.ClientSet.Rbac(), "view", f.Namespace.Name, rbacv1beta1.Subject{Kind: rbacv1beta1.ServiceAccountKind, Namespace: f.Namespace.Name, Name: "default"}) @@ -606,11 +606,70 @@ var _ = framework.KubeDescribe("Kubectl client", func() { framework.Logf("copying %s to the %s pod", kubectlPath, simplePodName) framework.RunKubectlOrDie("cp", kubectlPath, ns+"/"+simplePodName+":/tmp/") + // Build a kubeconfig file that will make use of the injected ca and token, + // but point at the DNS host and the default namespace + tmpDir, err := ioutil.TempDir("", "icc-override") + overrideKubeconfigName := "icc-override.kubeconfig" + framework.ExpectNoError(err) + defer func() { os.Remove(tmpDir) }() + framework.ExpectNoError(ioutil.WriteFile(filepath.Join(tmpDir, overrideKubeconfigName), []byte(` +kind: Config +apiVersion: v1 +clusters: +- cluster: + api-version: v1 + server: https://kubernetes.default.svc:443 + certificate-authority: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + name: kubeconfig-cluster +contexts: +- context: + cluster: kubeconfig-cluster + namespace: default + user: kubeconfig-user + name: kubeconfig-context +current-context: kubeconfig-context +users: +- name: kubeconfig-user + user: + tokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token +`), os.FileMode(0755))) + framework.Logf("copying override kubeconfig to the %s pod", simplePodName) + framework.RunKubectlOrDie("cp", filepath.Join(tmpDir, overrideKubeconfigName), ns+"/"+simplePodName+":/tmp/") + + framework.ExpectNoError(ioutil.WriteFile(filepath.Join(tmpDir, "invalid-configmap-with-namespace.yaml"), []byte(` +kind: ConfigMap +apiVersion: v1 +metadata: + name: "configmap with namespace and invalid name" + namespace: configmap-namespace +`), os.FileMode(0755))) + framework.ExpectNoError(ioutil.WriteFile(filepath.Join(tmpDir, "invalid-configmap-without-namespace.yaml"), []byte(` +kind: ConfigMap +apiVersion: v1 +metadata: + name: "configmap without namespace and invalid name" +`), os.FileMode(0755))) + framework.Logf("copying configmap manifests to the %s pod", simplePodName) + framework.RunKubectlOrDie("cp", filepath.Join(tmpDir, "invalid-configmap-with-namespace.yaml"), ns+"/"+simplePodName+":/tmp/") + framework.RunKubectlOrDie("cp", filepath.Join(tmpDir, "invalid-configmap-without-namespace.yaml"), ns+"/"+simplePodName+":/tmp/") + By("getting pods with in-cluster configs") - execOutput := framework.RunHostCmdOrDie(ns, simplePodName, "/tmp/kubectl get pods") - if matched, err := regexp.MatchString("nginx +1/1 +Running", execOutput); err != nil || !matched { - framework.Failf("Unexpected kubectl exec output: ", execOutput) - } + execOutput := framework.RunHostCmdOrDie(ns, simplePodName, "/tmp/kubectl get pods --v=7 2>&1") + Expect(execOutput).To(MatchRegexp("nginx +1/1 +Running")) + Expect(execOutput).To(ContainSubstring("Using in-cluster namespace")) + Expect(execOutput).To(ContainSubstring("Using in-cluster configuration")) + + By("creating an object containing a namespace with in-cluster config") + _, err = framework.RunHostCmd(ns, simplePodName, "/tmp/kubectl create -f /tmp/invalid-configmap-with-namespace.yaml --v=7 2>&1") + Expect(err).To(ContainSubstring("Using in-cluster namespace")) + Expect(err).To(ContainSubstring("Using in-cluster configuration")) + Expect(err).To(ContainSubstring(fmt.Sprintf("POST https://%s:%s/api/v1/namespaces/configmap-namespace/configmaps", inClusterHost, inClusterPort))) + + By("creating an object not containing a namespace with in-cluster config") + _, err = framework.RunHostCmd(ns, simplePodName, "/tmp/kubectl create -f /tmp/invalid-configmap-without-namespace.yaml --v=7 2>&1") + Expect(err).To(ContainSubstring("Using in-cluster namespace")) + Expect(err).To(ContainSubstring("Using in-cluster configuration")) + Expect(err).To(ContainSubstring(fmt.Sprintf("POST https://%s:%s/api/v1/namespaces/%s/configmaps", inClusterHost, inClusterPort, f.Namespace.Name))) By("trying to use kubectl with invalid token") _, err = framework.RunHostCmd(ns, simplePodName, "/tmp/kubectl get pods --token=invalid --v=7 2>&1") @@ -629,13 +688,17 @@ var _ = framework.KubeDescribe("Kubectl client", func() { Expect(err).To(ContainSubstring("GET http://invalid/api")) By("trying to use kubectl with invalid namespace") - output, _ := framework.RunHostCmd(ns, simplePodName, "/tmp/kubectl get pods --namespace=invalid --v=6 2>&1") - Expect(output).To(ContainSubstring("No resources found")) - Expect(output).ToNot(ContainSubstring("Using in-cluster namespace")) - Expect(output).To(ContainSubstring("Using in-cluster configuration")) - if matched, _ := regexp.MatchString(fmt.Sprintf("GET http[s]?://%s:%s/api/v1/namespaces/invalid/pods", inClusterHost, inClusterPort), output); !matched { - framework.Failf("Unexpected kubectl exec output: ", output) - } + execOutput = framework.RunHostCmdOrDie(ns, simplePodName, "/tmp/kubectl get pods --namespace=invalid --v=6 2>&1") + Expect(execOutput).To(ContainSubstring("No resources found")) + Expect(execOutput).ToNot(ContainSubstring("Using in-cluster namespace")) + Expect(execOutput).To(ContainSubstring("Using in-cluster configuration")) + Expect(execOutput).To(MatchRegexp(fmt.Sprintf("GET http[s]?://%s:%s/api/v1/namespaces/invalid/pods", inClusterHost, inClusterPort))) + + By("trying to use kubectl with kubeconfig") + execOutput = framework.RunHostCmdOrDie(ns, simplePodName, "/tmp/kubectl get pods --kubeconfig=/tmp/"+overrideKubeconfigName+" --v=6 2>&1") + Expect(execOutput).ToNot(ContainSubstring("Using in-cluster namespace")) + Expect(execOutput).ToNot(ContainSubstring("Using in-cluster configuration")) + Expect(execOutput).To(ContainSubstring("GET https://kubernetes.default.svc:443/api/v1/namespaces/default/pods")) }) }) diff --git a/vendor/k8s.io/kubernetes/test/e2e_node/BUILD b/vendor/k8s.io/kubernetes/test/e2e_node/BUILD index c12353fe..3a3f2fed 100644 --- a/vendor/k8s.io/kubernetes/test/e2e_node/BUILD +++ b/vendor/k8s.io/kubernetes/test/e2e_node/BUILD @@ -78,6 +78,7 @@ go_test( "resource_usage_test.go", "restart_test.go", "runtime_conformance_test.go", + "security_context_test.go", "summary_test.go", "volume_manager_test.go", ], @@ -125,6 +126,7 @@ go_test( "//vendor:k8s.io/apimachinery/pkg/runtime/schema", "//vendor:k8s.io/apimachinery/pkg/types", "//vendor:k8s.io/apimachinery/pkg/util/intstr", + "//vendor:k8s.io/apimachinery/pkg/util/sets", "//vendor:k8s.io/apimachinery/pkg/util/uuid", "//vendor:k8s.io/apimachinery/pkg/watch", "//vendor:k8s.io/client-go/pkg/api", diff --git a/vendor/k8s.io/kubernetes/test/e2e_node/jenkins/jenkins-ci-ubuntu.properties b/vendor/k8s.io/kubernetes/test/e2e_node/jenkins/jenkins-ci-ubuntu.properties new file mode 100644 index 00000000..be586461 --- /dev/null +++ b/vendor/k8s.io/kubernetes/test/e2e_node/jenkins/jenkins-ci-ubuntu.properties @@ -0,0 +1,10 @@ +GCE_HOSTS= +GCE_IMAGE_CONFIG_PATH= +GCE_IMAGES=ubuntu-gke-1604-xenial-v20170420-1 +GCE_IMAGE_PROJECT=ubuntu-os-gke-cloud +GCE_ZONE=us-central1-f +GCE_PROJECT=k8s-jkns-ubuntu-node +CLEANUP=true +GINKGO_FLAGS='--skip="\[Flaky\]|\[Serial\]"' +KUBELET_ARGS='--cgroups-per-qos=true --cgroup-root=/' +TIMEOUT=1h diff --git a/vendor/k8s.io/kubernetes/test/e2e_node/jenkins/jenkins-serial-ubuntu.properties b/vendor/k8s.io/kubernetes/test/e2e_node/jenkins/jenkins-serial-ubuntu.properties new file mode 100644 index 00000000..e34bffce --- /dev/null +++ b/vendor/k8s.io/kubernetes/test/e2e_node/jenkins/jenkins-serial-ubuntu.properties @@ -0,0 +1,12 @@ +GCE_HOSTS= +GCE_IMAGE_CONFIG_PATH= +GCE_IMAGES=ubuntu-gke-1604-xenial-v20170420-1 +GCE_IMAGE_PROJECT=ubuntu-os-gke-cloud +GCE_ZONE=us-central1-f +GCE_PROJECT=k8s-jkns-ubuntu-node-serial +CLEANUP=true +GINKGO_FLAGS='--focus="\[Serial\]" --skip="\[Flaky\]|\[Benchmark\]"' +TEST_ARGS='--feature-gates=DynamicKubeletConfig=true' +KUBELET_ARGS='--cgroups-per-qos=true --cgroup-root=/' +PARALLELISM=1 +TIMEOUT=3h diff --git a/vendor/k8s.io/kubernetes/test/e2e_node/security_context_test.go b/vendor/k8s.io/kubernetes/test/e2e_node/security_context_test.go new file mode 100644 index 00000000..2b33f55e --- /dev/null +++ b/vendor/k8s.io/kubernetes/test/e2e_node/security_context_test.go @@ -0,0 +1,117 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package e2e_node + +import ( + "strings" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/uuid" + "k8s.io/kubernetes/pkg/api/v1" + "k8s.io/kubernetes/test/e2e/framework" + + . "github.com/onsi/ginkgo" +) + +var _ = framework.KubeDescribe("Security Context", func() { + f := framework.NewDefaultFramework("security-context-test") + var podClient *framework.PodClient + BeforeEach(func() { + podClient = f.PodClient() + }) + + Context("when creating a pod in the host PID namespace", func() { + makeHostPidPod := func(podName, image string, command []string, hostPID bool) *v1.Pod { + return &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: podName, + }, + Spec: v1.PodSpec{ + RestartPolicy: v1.RestartPolicyNever, + HostPID: hostPID, + Containers: []v1.Container{ + { + Image: image, + Name: podName, + Command: command, + }, + }, + }, + } + } + createAndWaitHostPidPod := func(podName string, hostPID bool) { + podClient.Create(makeHostPidPod(podName, + "gcr.io/google_containers/busybox:1.24", + []string{"sh", "-c", "pidof nginx || true"}, + hostPID, + )) + + podClient.WaitForSuccess(podName, framework.PodStartTimeout) + } + + nginxPid := "" + BeforeEach(func() { + nginxPodName := "nginx-hostpid-" + string(uuid.NewUUID()) + podClient.CreateSync(makeHostPidPod(nginxPodName, + "gcr.io/google_containers/nginx-slim:0.7", + nil, + true, + )) + + output := f.ExecShellInContainer(nginxPodName, nginxPodName, + "cat /var/run/nginx.pid") + nginxPid = strings.TrimSpace(output) + }) + + It("should show its pid in the host PID namespace", func() { + busyboxPodName := "busybox-hostpid-" + string(uuid.NewUUID()) + createAndWaitHostPidPod(busyboxPodName, true) + logs, err := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, busyboxPodName, busyboxPodName) + if err != nil { + framework.Failf("GetPodLogs for pod %q failed: %v", busyboxPodName, err) + } + + pids := strings.TrimSpace(logs) + framework.Logf("Got nginx's pid %q from pod %q", pids, busyboxPodName) + if pids == "" { + framework.Failf("nginx's pid should be seen by hostpid containers") + } + + pidSets := sets.NewString(strings.Split(pids, " ")...) + if !pidSets.Has(nginxPid) { + framework.Failf("nginx's pid should be seen by hostpid containers") + } + }) + + It("should not show its pid in the non-hostpid containers", func() { + busyboxPodName := "busybox-non-hostpid-" + string(uuid.NewUUID()) + createAndWaitHostPidPod(busyboxPodName, false) + logs, err := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, busyboxPodName, busyboxPodName) + if err != nil { + framework.Failf("GetPodLogs for pod %q failed: %v", busyboxPodName, err) + } + + pids := strings.TrimSpace(logs) + framework.Logf("Got nginx's pid %q from pod %q", pids, busyboxPodName) + pidSets := sets.NewString(strings.Split(pids, " ")...) + if pidSets.Has(nginxPid) { + framework.Failf("nginx's pid should not be seen by non-hostpid containers") + } + }) + }) +}) diff --git a/vendor/k8s.io/kubernetes/test/integration/apiserver/patch_test.go b/vendor/k8s.io/kubernetes/test/integration/apiserver/patch_test.go new file mode 100644 index 00000000..8899c7df --- /dev/null +++ b/vendor/k8s.io/kubernetes/test/integration/apiserver/patch_test.go @@ -0,0 +1,116 @@ +// +build integration,!no-etcd + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package apiserver + +import ( + "fmt" + "sync" + "sync/atomic" + "testing" + + "github.com/pborman/uuid" + + "reflect" + + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apiserver/pkg/endpoints/handlers" + "k8s.io/kubernetes/pkg/api/v1" + "k8s.io/kubernetes/test/integration/framework" +) + +// Tests that the apiserver retries non-overlapping conflicts on patches +func TestPatchConflicts(t *testing.T) { + s, clientSet := setup(t) + defer s.Close() + + ns := framework.CreateTestingNamespace("status-code", s, t) + defer framework.DeleteTestingNamespace(ns, s, t) + + // Create the object we're going to conflict on + clientSet.Core().Secrets(ns.Name).Create(&v1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + // Populate annotations so the strategic patch descends, compares, and notices the $patch directive + Annotations: map[string]string{"initial": "value"}, + }, + }) + client := clientSet.Core().RESTClient() + + successes := int32(0) + + // Run a lot of simultaneous patch operations to exercise internal API server retry of patch application. + // Internally, a patch API call retries up to MaxRetryWhenPatchConflicts times if the resource version of the object has changed. + // If the resource version of the object changed between attempts, that means another one of our patch requests succeeded. + // That means if we run 2*MaxRetryWhenPatchConflicts patch attempts, we should see at least MaxRetryWhenPatchConflicts succeed. + wg := sync.WaitGroup{} + for i := 0; i < (2 * handlers.MaxRetryWhenPatchConflicts); i++ { + wg.Add(1) + go func(i int) { + defer wg.Done() + annotationName := fmt.Sprintf("annotation-%d", i) + labelName := fmt.Sprintf("label-%d", i) + value := uuid.NewRandom().String() + + obj, err := client.Patch(types.StrategicMergePatchType). + Namespace(ns.Name). + Resource("secrets"). + Name("test"). + Body([]byte(fmt.Sprintf(`{"metadata":{"labels":{"%s":"%s"}, "annotations":{"$patch":"replace","%s":"%s"}}}`, labelName, value, annotationName, value))). + Do(). + Get() + + if errors.IsConflict(err) { + t.Logf("tolerated conflict error patching %s: %v", "secrets", err) + return + } + if err != nil { + t.Errorf("error patching %s: %v", "secrets", err) + return + } + + accessor, err := meta.Accessor(obj) + if err != nil { + t.Errorf("error getting object from %s: %v", "secrets", err) + return + } + // make sure the label we wanted was effective + if accessor.GetLabels()[labelName] != value { + t.Errorf("patch of %s was ineffective, expected %s=%s, got labels %#v", "secrets", labelName, value, accessor.GetLabels()) + return + } + // make sure the patch directive didn't get lost, and that the entire annotation map was replaced + if !reflect.DeepEqual(accessor.GetAnnotations(), map[string]string{annotationName: value}) { + t.Errorf("patch of %s with $patch directive was ineffective, didn't replace entire annotations map: %#v", "secrets", accessor.GetAnnotations()) + } + + atomic.AddInt32(&successes, 1) + }(i) + } + wg.Wait() + + if successes < handlers.MaxRetryWhenPatchConflicts { + t.Errorf("Expected at least %d successful patches for %s, got %d", handlers.MaxRetryWhenPatchConflicts, "secrets", successes) + } else { + t.Logf("Got %d successful patches for %s", successes, "secrets") + } + +}