Upgrade k8s to v1.6.4

Signed-off-by: Antonio Murdaca <runcom@redhat.com>
This commit is contained in:
Antonio Murdaca 2017-05-22 16:19:39 +02:00
parent d09f928fac
commit 6395319aef
No known key found for this signature in database
GPG key ID: B2BEAD150DE936B9
140 changed files with 3568 additions and 1301 deletions

View file

@ -1,5 +1,5 @@
{ {
"memo": "c0b6d8274a7286341387dd0a060d5f1c171bf8658cc76602c6e026880a951a08", "memo": "88a7bc044db73c7ab2adc009d3780db88e39e36cbb96e0f1f4b0636929481543",
"projects": [ "projects": [
{ {
"name": "cloud.google.com/go", "name": "cloud.google.com/go",
@ -176,7 +176,8 @@
"revision": "48702e0da86bd25e76cfef347e2adeb434a0d0a6", "revision": "48702e0da86bd25e76cfef347e2adeb434a0d0a6",
"packages": [ "packages": [
"daemon", "daemon",
"dbus" "dbus",
"util"
] ]
}, },
{ {
@ -184,6 +185,7 @@
"version": "v3", "version": "v3",
"revision": "3ac0863d7acf3bc44daf49afef8919af12f704ef", "revision": "3ac0863d7acf3bc44daf49afef8919af12f704ef",
"packages": [ "packages": [
"dlopen",
"health", "health",
"httputil", "httputil",
"timeutil" "timeutil"
@ -523,10 +525,14 @@
"revision": "b263a43430ac6996a4302b891688544225197294", "revision": "b263a43430ac6996a4302b891688544225197294",
"packages": [ "packages": [
"libcontainer/apparmor", "libcontainer/apparmor",
"libcontainer/cgroups",
"libcontainer/cgroups/fs",
"libcontainer/cgroups/systemd",
"libcontainer/configs", "libcontainer/configs",
"libcontainer/devices", "libcontainer/devices",
"libcontainer/system", "libcontainer/system",
"libcontainer/user" "libcontainer/user",
"libcontainer/utils"
] ]
}, },
{ {
@ -840,8 +846,8 @@
}, },
{ {
"name": "k8s.io/kubernetes", "name": "k8s.io/kubernetes",
"version": "v1.6.1", "version": "v1.6.4",
"revision": "b0b7a323cc5a4a2019b2e9520c21c7830b7f708e", "revision": "d6f433224538d4f9ca2f7ae19b252e6fcb66a3ae",
"packages": [ "packages": [
"pkg/api", "pkg/api",
"pkg/api/install", "pkg/api/install",

View file

@ -9,9 +9,6 @@
"github.com/containers/storage": { "github.com/containers/storage": {
"branch": "master" "branch": "master"
}, },
"github.com/opencontainers/image-spec": {
"version": "v1.0.0-rc5"
},
"github.com/docker/distribution": { "github.com/docker/distribution": {
"branch": "master" "branch": "master"
}, },
@ -21,6 +18,9 @@
"github.com/godbus/dbus": { "github.com/godbus/dbus": {
"version": "^4.0.0" "version": "^4.0.0"
}, },
"github.com/opencontainers/image-spec": {
"version": "v1.0.0-rc5"
},
"github.com/opencontainers/runc": { "github.com/opencontainers/runc": {
"branch": "master" "branch": "master"
}, },
@ -36,11 +36,11 @@
"google.golang.org/grpc": { "google.golang.org/grpc": {
"version": "v1.0.1-GA" "version": "v1.0.1-GA"
}, },
"k8s.io/kubernetes": {
"version": "v1.6.1"
},
"k8s.io/apimachinery": { "k8s.io/apimachinery": {
"revision": "ae33df8bd0294deb6f1853cc107816dd181e0146" "revision": "ae33df8bd0294deb6f1853cc107816dd181e0146"
},
"k8s.io/kubernetes": {
"version": "~v1.6.1"
} }
} }
} }

16
vendor/k8s.io/kubernetes/WORKSPACE generated vendored
View file

@ -24,33 +24,39 @@ go_repositories()
debs = ( debs = (
( (
"busybox_deb", "busybox_deb",
"7465567f5e5255188b1d004d7081066cd79f77a5c18a5d418d27966d698e0bef", "5f81f140777454e71b9e5bfdce9c89993de5ddf4a7295ea1cfda364f8f630947",
"http://ftp.us.debian.org/debian/pool/main/b/busybox/busybox-static_1.22.0-19+b2_amd64.deb", "http://ftp.us.debian.org/debian/pool/main/b/busybox/busybox-static_1.22.0-19+b3_amd64.deb",
"https://storage.googleapis.com/kubernetes-release/debs/busybox-static_1.22.0-19+b3_amd64.deb",
), ),
( (
"libc_deb", "libc_deb",
"6bbd506b171a9f29b09fde77e2749c0aa0c1439058df9d1a6408d464069b7dd6", "372aac4a9ce9dbb26a08de0b9c41b0500ba019430295d29f39566483f5f32732",
"http://ftp.us.debian.org/debian/pool/main/g/glibc/libc6_2.24-9_amd64.deb", "http://ftp.us.debian.org/debian/pool/main/g/glibc/libc6_2.24-10_amd64.deb",
"https://storage.googleapis.com/kubernetes-release/debs/libc6_2.24-10_amd64.deb",
), ),
( (
"iptables_deb", "iptables_deb",
"7747388a97ba71fede302d70361c81d486770a2024185514c18b5d8eab6aaf4e", "7747388a97ba71fede302d70361c81d486770a2024185514c18b5d8eab6aaf4e",
"http://ftp.us.debian.org/debian/pool/main/i/iptables/iptables_1.4.21-2+b1_amd64.deb", "http://ftp.us.debian.org/debian/pool/main/i/iptables/iptables_1.4.21-2+b1_amd64.deb",
"https://storage.googleapis.com/kubernetes-release/debs/iptables_1.4.21-2+b1_amd64.deb",
), ),
( (
"libnetlink_deb", "libnetlink_deb",
"5d486022cd9e047e9afbb1617cf4519c0decfc3d2c1fad7e7fe5604943dbbf37", "5d486022cd9e047e9afbb1617cf4519c0decfc3d2c1fad7e7fe5604943dbbf37",
"http://ftp.us.debian.org/debian/pool/main/libn/libnfnetlink/libnfnetlink0_1.0.1-3_amd64.deb", "http://ftp.us.debian.org/debian/pool/main/libn/libnfnetlink/libnfnetlink0_1.0.1-3_amd64.deb",
"https://storage.googleapis.com/kubernetes-release/debs/libnfnetlink0_1.0.1-3_amd64.deb",
), ),
( (
"libxtables_deb", "libxtables_deb",
"6783f316af4cbf3ada8b9a2b7bb5f53a87c0c2575c1903ce371fdbd45d3626c6", "6783f316af4cbf3ada8b9a2b7bb5f53a87c0c2575c1903ce371fdbd45d3626c6",
"http://ftp.us.debian.org/debian/pool/main/i/iptables/libxtables10_1.4.21-2+b1_amd64.deb", "http://ftp.us.debian.org/debian/pool/main/i/iptables/libxtables10_1.4.21-2+b1_amd64.deb",
"https://storage.googleapis.com/kubernetes-release/debs/libxtables10_1.4.21-2+b1_amd64.deb",
), ),
( (
"iproute2_deb", "iproute2_deb",
"3ce9cb1d03a2a1359cbdd4f863b15d0c906096bf713e8eb688149da2f4e350bc", "3ce9cb1d03a2a1359cbdd4f863b15d0c906096bf713e8eb688149da2f4e350bc",
"http://ftp.us.debian.org/debian/pool/main/i/iproute2/iproute_3.16.0-2_all.deb", "http://ftp.us.debian.org/debian/pool/main/i/iproute2/iproute_3.16.0-2_all.deb",
"https://storage.googleapis.com/kubernetes-release/debs/iproute_3.16.0-2_all.deb",
), ),
) )
@ -58,7 +64,7 @@ debs = (
name = name, name = name,
sha256 = sha256, sha256 = sha256,
url = url, url = url,
) for name, sha256, url in debs] ) for name, sha256, origin, url in debs]
http_file( http_file(
name = "kubernetes_cni", name = "kubernetes_cni",

View file

@ -2,7 +2,7 @@
"swagger": "2.0", "swagger": "2.0",
"info": { "info": {
"title": "Kubernetes", "title": "Kubernetes",
"version": "v1.6.1" "version": "v1.6.4"
}, },
"paths": { "paths": {
"/api/": { "/api/": {
@ -43262,7 +43262,7 @@
], ],
"properties": { "properties": {
"concurrencyPolicy": { "concurrencyPolicy": {
"description": "ConcurrencyPolicy specifies how to treat concurrent executions of a Job.", "description": "ConcurrencyPolicy specifies how to treat concurrent executions of a Job. Defaults to Allow.",
"type": "string" "type": "string"
}, },
"failedJobsHistoryLimit": { "failedJobsHistoryLimit": {

View file

@ -2,6 +2,7 @@ apiVersion: v1
kind: ServiceAccount kind: ServiceAccount
metadata: metadata:
name: kube-dns name: kube-dns
namespace: kube-system
labels: labels:
kubernetes.io/cluster-service: "true" kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile addonmanager.kubernetes.io/mode: Reconcile

View file

@ -79,9 +79,6 @@ data:
type counter type counter
name logging_line_count name logging_line_count
desc Total number of lines generated by application containers desc Total number of lines generated by application containers
<labels>
tag ${tag}
</labels>
</metric> </metric>
</store> </store>
<store> <store>
@ -342,9 +339,8 @@ data:
<metric> <metric>
type counter type counter
name logging_entry_count name logging_entry_count
desc Total number of log entries generated by either an application container or a system component desc Total number of log entries generated by either application containers or system components
<labels> <labels>
tag ${tag}
component container component container
</labels> </labels>
</metric> </metric>
@ -376,9 +372,8 @@ data:
<metric> <metric>
type counter type counter
name logging_entry_count name logging_entry_count
desc Total number of log entries generated by either an application container or a system component desc Total number of log entries generated by either application containers or system components
<labels> <labels>
tag ${tag}
component system component system
</labels> </labels>
</metric> </metric>

View file

@ -1,6 +1,6 @@
#!/bin/bash #!/bin/bash
# Copyright 2015 The Kubernetes Authors. # Copyright 2017 The Kubernetes Authors.
# #
# Licensed under the Apache License, Version 2.0 (the "License"); # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. # you may not use this file except in compliance with the License.
@ -461,8 +461,8 @@ function find-release-tars() {
# This tarball is used by GCI, Ubuntu Trusty, and Container Linux. # This tarball is used by GCI, Ubuntu Trusty, and Container Linux.
KUBE_MANIFESTS_TAR= KUBE_MANIFESTS_TAR=
if [[ "${MASTER_OS_DISTRIBUTION:-}" == "trusty" || "${MASTER_OS_DISTRIBUTION:-}" == "gci" || "${MASTER_OS_DISTRIBUTION:-}" == "container-linux" ]] || \ if [[ "${MASTER_OS_DISTRIBUTION:-}" == "trusty" || "${MASTER_OS_DISTRIBUTION:-}" == "gci" || "${MASTER_OS_DISTRIBUTION:-}" == "container-linux" || "${MASTER_OS_DISTRIBUTION:-}" == "ubuntu" ]] || \
[[ "${NODE_OS_DISTRIBUTION:-}" == "trusty" || "${NODE_OS_DISTRIBUTION:-}" == "gci" || "${NODE_OS_DISTRIBUTION:-}" == "container-linux" ]] ; then [[ "${NODE_OS_DISTRIBUTION:-}" == "trusty" || "${NODE_OS_DISTRIBUTION:-}" == "gci" || "${NODE_OS_DISTRIBUTION:-}" == "container-linux" || "${NODE_OS_DISTRIBUTION:-}" == "ubuntu" ]] ; then
KUBE_MANIFESTS_TAR=$(find-tar kubernetes-manifests.tar.gz) KUBE_MANIFESTS_TAR=$(find-tar kubernetes-manifests.tar.gz)
fi fi
} }
@ -598,7 +598,9 @@ function build-kube-env {
local salt_tar_url=$SALT_TAR_URL local salt_tar_url=$SALT_TAR_URL
local kube_manifests_tar_url="${KUBE_MANIFESTS_TAR_URL:-}" local kube_manifests_tar_url="${KUBE_MANIFESTS_TAR_URL:-}"
if [[ "${master}" == "true" && "${MASTER_OS_DISTRIBUTION}" == "container-linux" ]] || \ if [[ "${master}" == "true" && "${MASTER_OS_DISTRIBUTION}" == "container-linux" ]] || \
[[ "${master}" == "false" && "${NODE_OS_DISTRIBUTION}" == "container-linux" ]] ; then [[ "${master}" == "false" && "${NODE_OS_DISTRIBUTION}" == "container-linux" ]] || \
[[ "${master}" == "true" && "${MASTER_OS_DISTRIBUTION}" == "ubuntu" ]] || \
[[ "${master}" == "false" && "${NODE_OS_DISTRIBUTION}" == "ubuntu" ]] ; then
# TODO: Support fallback .tar.gz settings on Container Linux # TODO: Support fallback .tar.gz settings on Container Linux
server_binary_tar_url=$(split_csv "${SERVER_BINARY_TAR_URL}") server_binary_tar_url=$(split_csv "${SERVER_BINARY_TAR_URL}")
salt_tar_url=$(split_csv "${SALT_TAR_URL}") salt_tar_url=$(split_csv "${SALT_TAR_URL}")
@ -682,8 +684,8 @@ EOF
TERMINATED_POD_GC_THRESHOLD: $(yaml-quote ${TERMINATED_POD_GC_THRESHOLD}) TERMINATED_POD_GC_THRESHOLD: $(yaml-quote ${TERMINATED_POD_GC_THRESHOLD})
EOF EOF
fi fi
if [[ "${master}" == "true" && ("${MASTER_OS_DISTRIBUTION}" == "trusty" || "${MASTER_OS_DISTRIBUTION}" == "gci" || "${MASTER_OS_DISTRIBUTION}" == "container-linux") ]] || \ if [[ "${master}" == "true" && ("${MASTER_OS_DISTRIBUTION}" == "trusty" || "${MASTER_OS_DISTRIBUTION}" == "gci" || "${MASTER_OS_DISTRIBUTION}" == "container-linux") || "${MASTER_OS_DISTRIBUTION}" == "ubuntu" ]] || \
[[ "${master}" == "false" && ("${NODE_OS_DISTRIBUTION}" == "trusty" || "${NODE_OS_DISTRIBUTION}" == "gci" || "${NODE_OS_DISTRIBUTION}" == "container-linux") ]] ; then [[ "${master}" == "false" && ("${NODE_OS_DISTRIBUTION}" == "trusty" || "${NODE_OS_DISTRIBUTION}" == "gci" || "${NODE_OS_DISTRIBUTION}" == "container-linux") || "${NODE_OS_DISTRIBUTION}" = "ubuntu" ]] ; then
cat >>$file <<EOF cat >>$file <<EOF
KUBE_MANIFESTS_TAR_URL: $(yaml-quote ${kube_manifests_tar_url}) KUBE_MANIFESTS_TAR_URL: $(yaml-quote ${kube_manifests_tar_url})
KUBE_MANIFESTS_TAR_HASH: $(yaml-quote ${KUBE_MANIFESTS_TAR_HASH}) KUBE_MANIFESTS_TAR_HASH: $(yaml-quote ${KUBE_MANIFESTS_TAR_HASH})

View file

@ -999,6 +999,9 @@ function start-kube-apiserver {
params+=" --kubelet-client-certificate=${APISERVER_CLIENT_CERT_PATH}" params+=" --kubelet-client-certificate=${APISERVER_CLIENT_CERT_PATH}"
params+=" --kubelet-client-key=${APISERVER_CLIENT_KEY_PATH}" params+=" --kubelet-client-key=${APISERVER_CLIENT_KEY_PATH}"
fi fi
if [[ -n "${SERVICEACCOUNT_CERT_PATH:-}" ]]; then
params+=" --service-account-key-file=${SERVICEACCOUNT_CERT_PATH}"
fi
params+=" --token-auth-file=/etc/srv/kubernetes/known_tokens.csv" params+=" --token-auth-file=/etc/srv/kubernetes/known_tokens.csv"
if [[ -n "${KUBE_PASSWORD:-}" && -n "${KUBE_USER:-}" ]]; then if [[ -n "${KUBE_PASSWORD:-}" && -n "${KUBE_USER:-}" ]]; then
params+=" --basic-auth-file=/etc/srv/kubernetes/basic_auth.csv" params+=" --basic-auth-file=/etc/srv/kubernetes/basic_auth.csv"

View file

@ -112,6 +112,9 @@ write_files:
[Unit] [Unit]
Description=Kubernetes Description=Kubernetes
[Install]
WantedBy=multi-user.target
runcmd: runcmd:
- systemctl daemon-reload - systemctl daemon-reload
- systemctl enable kube-master-installation.service - systemctl enable kube-master-installation.service
@ -120,4 +123,5 @@ runcmd:
- systemctl enable kubelet-monitor.service - systemctl enable kubelet-monitor.service
- systemctl enable kube-logrotate.timer - systemctl enable kube-logrotate.timer
- systemctl enable kube-logrotate.service - systemctl enable kube-logrotate.service
- systemctl enable kubernetes.target
- systemctl start kubernetes.target - systemctl start kubernetes.target

View file

@ -112,6 +112,9 @@ write_files:
[Unit] [Unit]
Description=Kubernetes Description=Kubernetes
[Install]
WantedBy=multi-user.target
runcmd: runcmd:
- systemctl daemon-reload - systemctl daemon-reload
- systemctl enable kube-node-installation.service - systemctl enable kube-node-installation.service
@ -120,4 +123,5 @@ runcmd:
- systemctl enable kubelet-monitor.service - systemctl enable kubelet-monitor.service
- systemctl enable kube-logrotate.timer - systemctl enable kube-logrotate.timer
- systemctl enable kube-logrotate.service - systemctl enable kube-logrotate.service
- systemctl enable kubernetes.target
- systemctl start kubernetes.target - systemctl start kubernetes.target

View file

@ -1,6 +1,6 @@
#!/bin/bash #!/bin/bash
# Copyright 2014 The Kubernetes Authors. # Copyright 2017 The Kubernetes Authors.
# #
# Licensed under the Apache License, Version 2.0 (the "License"); # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. # you may not use this file except in compliance with the License.
@ -23,14 +23,14 @@ source "${KUBE_ROOT}/cluster/gce/${KUBE_CONFIG_FILE-"config-default.sh"}"
source "${KUBE_ROOT}/cluster/common.sh" source "${KUBE_ROOT}/cluster/common.sh"
source "${KUBE_ROOT}/cluster/lib/util.sh" source "${KUBE_ROOT}/cluster/lib/util.sh"
if [[ "${NODE_OS_DISTRIBUTION}" == "debian" || "${NODE_OS_DISTRIBUTION}" == "container-linux" || "${NODE_OS_DISTRIBUTION}" == "trusty" || "${NODE_OS_DISTRIBUTION}" == "gci" ]]; then if [[ "${NODE_OS_DISTRIBUTION}" == "debian" || "${NODE_OS_DISTRIBUTION}" == "container-linux" || "${NODE_OS_DISTRIBUTION}" == "trusty" || "${NODE_OS_DISTRIBUTION}" == "gci" || "${NODE_OS_DISTRIBUTION}" == "ubuntu" ]]; then
source "${KUBE_ROOT}/cluster/gce/${NODE_OS_DISTRIBUTION}/node-helper.sh" source "${KUBE_ROOT}/cluster/gce/${NODE_OS_DISTRIBUTION}/node-helper.sh"
else else
echo "Cannot operate on cluster using node os distro: ${NODE_OS_DISTRIBUTION}" >&2 echo "Cannot operate on cluster using node os distro: ${NODE_OS_DISTRIBUTION}" >&2
exit 1 exit 1
fi fi
if [[ "${MASTER_OS_DISTRIBUTION}" == "container-linux" || "${MASTER_OS_DISTRIBUTION}" == "trusty" || "${MASTER_OS_DISTRIBUTION}" == "gci" ]]; then if [[ "${MASTER_OS_DISTRIBUTION}" == "container-linux" || "${MASTER_OS_DISTRIBUTION}" == "trusty" || "${MASTER_OS_DISTRIBUTION}" == "gci" || "${MASTER_OS_DISTRIBUTION}" == "ubuntu" ]]; then
source "${KUBE_ROOT}/cluster/gce/${MASTER_OS_DISTRIBUTION}/master-helper.sh" source "${KUBE_ROOT}/cluster/gce/${MASTER_OS_DISTRIBUTION}/master-helper.sh"
else else
echo "Cannot operate on cluster using master os distro: ${MASTER_OS_DISTRIBUTION}" >&2 echo "Cannot operate on cluster using master os distro: ${MASTER_OS_DISTRIBUTION}" >&2
@ -592,8 +592,8 @@ function kube-up() {
parse-master-env parse-master-env
create-nodes create-nodes
elif [[ ${KUBE_REPLICATE_EXISTING_MASTER:-} == "true" ]]; then elif [[ ${KUBE_REPLICATE_EXISTING_MASTER:-} == "true" ]]; then
if [[ "${MASTER_OS_DISTRIBUTION}" != "gci" && "${MASTER_OS_DISTRIBUTION}" != "debian" ]]; then if [[ "${MASTER_OS_DISTRIBUTION}" != "gci" && "${MASTER_OS_DISTRIBUTION}" != "debian" && "${MASTER_OS_DISTRIBUTION}" != "ubuntu" ]]; then
echo "Master replication supported only for gci and debian" echo "Master replication supported only for gci, debian, and ubuntu"
return 1 return 1
fi fi
create-loadbalancer create-loadbalancer

View file

@ -19,6 +19,7 @@ CLUSTER_NAME="${CLUSTER_NAME:-${USER}-gke-e2e}"
NETWORK=${KUBE_GKE_NETWORK:-e2e} NETWORK=${KUBE_GKE_NETWORK:-e2e}
NODE_TAG="k8s-${CLUSTER_NAME}-node" NODE_TAG="k8s-${CLUSTER_NAME}-node"
IMAGE_TYPE="${KUBE_GKE_IMAGE_TYPE:-container_vm}" IMAGE_TYPE="${KUBE_GKE_IMAGE_TYPE:-container_vm}"
ENABLE_KUBERNETES_ALPHA="${KUBE_GKE_ENABLE_KUBERNETES_ALPHA:-}"
KUBE_DELETE_NETWORK=${KUBE_DELETE_NETWORK:-true} KUBE_DELETE_NETWORK=${KUBE_DELETE_NETWORK:-true}

View file

@ -135,6 +135,7 @@ function validate-cluster {
# HEAPSTER_MACHINE_TYPE (optional) # HEAPSTER_MACHINE_TYPE (optional)
# CLUSTER_IP_RANGE (optional) # CLUSTER_IP_RANGE (optional)
# GKE_CREATE_FLAGS (optional, space delineated) # GKE_CREATE_FLAGS (optional, space delineated)
# ENABLE_KUBERNETES_ALPHA (optional)
function kube-up() { function kube-up() {
echo "... in gke:kube-up()" >&2 echo "... in gke:kube-up()" >&2
detect-project >&2 detect-project >&2
@ -184,6 +185,10 @@ function kube-up() {
"--machine-type=${MACHINE_TYPE}" "--machine-type=${MACHINE_TYPE}"
) )
if [[ ! -z "${ENABLE_KUBERNETES_ALPHA:-}" ]]; then
create_args+=("--enable-kubernetes-alpha")
fi
if [[ ! -z "${ADDITIONAL_ZONES:-}" ]]; then if [[ ! -z "${ADDITIONAL_ZONES:-}" ]]; then
create_args+=("--additional-zones=${ADDITIONAL_ZONES}") create_args+=("--additional-zones=${ADDITIONAL_ZONES}")
fi fi

View file

@ -25,12 +25,12 @@
"containers": [ "containers": [
{ {
"name": "cluster-autoscaler", "name": "cluster-autoscaler",
"image": "gcr.io/google_containers/cluster-autoscaler:v0.5.1", "image": "gcr.io/google_containers/cluster-autoscaler:v0.5.4",
"command": [ "command": [
"./run.sh", "./run.sh",
"--kubernetes=http://127.0.0.1:8080?inClusterConfig=f", "--kubernetes=http://127.0.0.1:8080?inClusterConfig=f",
"--v=4", "--v=4",
"--stderrthreshold=info", "--logtostderr=true",
"--write-status-configmap=true", "--write-status-configmap=true",
"{{params}}" "{{params}}"
], ],

View file

@ -1,18 +1,18 @@
apiVersion: v1 apiVersion: v1
kind: Pod kind: Pod
metadata: metadata:
name: l7-lb-controller-v0.9.2 name: l7-lb-controller-v0.9.3
namespace: kube-system namespace: kube-system
labels: labels:
k8s-app: glbc k8s-app: glbc
version: v0.9.2 version: v0.9.3
kubernetes.io/cluster-service: "true" kubernetes.io/cluster-service: "true"
kubernetes.io/name: "GLBC" kubernetes.io/name: "GLBC"
spec: spec:
terminationGracePeriodSeconds: 600 terminationGracePeriodSeconds: 600
hostNetwork: true hostNetwork: true
containers: containers:
- image: gcr.io/google_containers/glbc:0.9.2 - image: gcr.io/google_containers/glbc:0.9.3
livenessProbe: livenessProbe:
httpGet: httpGet:
path: /healthz path: /healthz
@ -43,7 +43,7 @@ spec:
# TODO: split this out into args when we no longer need to pipe stdout to a file #6428 # TODO: split this out into args when we no longer need to pipe stdout to a file #6428
- sh - sh
- -c - -c
- '/glbc --verbose=true --default-backend-service=kube-system/default-http-backend --sync-period=600s --running-in-cluster=false --use-real-cloud=true --config-file-path=/etc/gce.conf --healthz-port=8086 1>>/var/log/glbc.log 2>&1' - '/glbc --verbose=true --apiserver-host=http://localhost:8080 --default-backend-service=kube-system/default-http-backend --sync-period=600s --running-in-cluster=false --use-real-cloud=true --config-file-path=/etc/gce.conf --healthz-port=8086 1>>/var/log/glbc.log 2>&1'
volumes: volumes:
- hostPath: - hostPath:
path: /etc/gce.conf path: /etc/gce.conf

View file

@ -31,9 +31,11 @@ go_library(
"//pkg/kubeapiserver:go_default_library", "//pkg/kubeapiserver:go_default_library",
"//pkg/kubeapiserver/admission:go_default_library", "//pkg/kubeapiserver/admission:go_default_library",
"//pkg/kubeapiserver/authenticator:go_default_library", "//pkg/kubeapiserver/authenticator:go_default_library",
"//pkg/kubeapiserver/authorizer/modes:go_default_library",
"//pkg/master:go_default_library", "//pkg/master:go_default_library",
"//pkg/master/tunneler:go_default_library", "//pkg/master/tunneler:go_default_library",
"//pkg/registry/cachesize:go_default_library", "//pkg/registry/cachesize:go_default_library",
"//pkg/registry/rbac/rest:go_default_library",
"//pkg/version:go_default_library", "//pkg/version:go_default_library",
"//plugin/pkg/admission/admit:go_default_library", "//plugin/pkg/admission/admit:go_default_library",
"//plugin/pkg/admission/alwayspullimages:go_default_library", "//plugin/pkg/admission/alwayspullimages:go_default_library",

View file

@ -63,9 +63,11 @@ import (
"k8s.io/kubernetes/pkg/kubeapiserver" "k8s.io/kubernetes/pkg/kubeapiserver"
kubeadmission "k8s.io/kubernetes/pkg/kubeapiserver/admission" kubeadmission "k8s.io/kubernetes/pkg/kubeapiserver/admission"
kubeauthenticator "k8s.io/kubernetes/pkg/kubeapiserver/authenticator" kubeauthenticator "k8s.io/kubernetes/pkg/kubeapiserver/authenticator"
"k8s.io/kubernetes/pkg/kubeapiserver/authorizer/modes"
"k8s.io/kubernetes/pkg/master" "k8s.io/kubernetes/pkg/master"
"k8s.io/kubernetes/pkg/master/tunneler" "k8s.io/kubernetes/pkg/master/tunneler"
"k8s.io/kubernetes/pkg/registry/cachesize" "k8s.io/kubernetes/pkg/registry/cachesize"
rbacrest "k8s.io/kubernetes/pkg/registry/rbac/rest"
"k8s.io/kubernetes/pkg/version" "k8s.io/kubernetes/pkg/version"
"k8s.io/kubernetes/plugin/pkg/auth/authenticator/token/bootstrap" "k8s.io/kubernetes/plugin/pkg/auth/authenticator/token/bootstrap"
) )
@ -324,6 +326,9 @@ func BuildMasterConfig(s *options.ServerRunOptions) (*master.Config, informers.S
if err != nil { if err != nil {
return nil, nil, fmt.Errorf("invalid authentication config: %v", err) return nil, nil, fmt.Errorf("invalid authentication config: %v", err)
} }
if !sets.NewString(s.Authorization.Modes()...).Has(modes.ModeRBAC) {
genericConfig.DisabledPostStartHooks.Insert(rbacrest.PostStartHookName)
}
authorizationConfig := s.Authorization.ToAuthorizationConfig(sharedInformers) authorizationConfig := s.Authorization.ToAuthorizationConfig(sharedInformers)
apiAuthorizer, err := authorizationConfig.New() apiAuthorizer, err := authorizationConfig.New()

View file

@ -26,14 +26,12 @@ import (
const ( const (
DefaultServiceDNSDomain = "cluster.local" DefaultServiceDNSDomain = "cluster.local"
DefaultServicesSubnet = "10.96.0.0/12" DefaultServicesSubnet = "10.96.0.0/12"
DefaultKubernetesVersion = "latest-1.6" DefaultKubernetesVersion = "stable-1.6"
// This is only for clusters without internet, were the latest stable version can't be determined DefaultAPIBindPort = 6443
DefaultKubernetesFallbackVersion = "v1.6.0" DefaultDiscoveryBindPort = 9898
DefaultAPIBindPort = 6443 DefaultAuthorizationMode = "RBAC"
DefaultDiscoveryBindPort = 9898 DefaultCACertPath = "/etc/kubernetes/pki/ca.crt"
DefaultAuthorizationMode = "RBAC" DefaultCertificatesDir = "/etc/kubernetes/pki"
DefaultCACertPath = "/etc/kubernetes/pki/ca.crt"
DefaultCertificatesDir = "/etc/kubernetes/pki"
) )
func addDefaultingFuncs(scheme *runtime.Scheme) error { func addDefaultingFuncs(scheme *runtime.Scheme) error {
@ -46,7 +44,7 @@ func addDefaultingFuncs(scheme *runtime.Scheme) error {
func SetDefaults_MasterConfiguration(obj *MasterConfiguration) { func SetDefaults_MasterConfiguration(obj *MasterConfiguration) {
if obj.KubernetesVersion == "" { if obj.KubernetesVersion == "" {
obj.KubernetesVersion = DefaultKubernetesFallbackVersion obj.KubernetesVersion = DefaultKubernetesVersion
} }
if obj.API.BindPort == 0 { if obj.API.BindPort == 0 {

View file

@ -22,7 +22,6 @@ import (
netutil "k8s.io/apimachinery/pkg/util/net" netutil "k8s.io/apimachinery/pkg/util/net"
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
kubeadmapiext "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1alpha1"
kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants" kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants"
kubeadmutil "k8s.io/kubernetes/cmd/kubeadm/app/util" kubeadmutil "k8s.io/kubernetes/cmd/kubeadm/app/util"
tokenutil "k8s.io/kubernetes/cmd/kubeadm/app/util/token" tokenutil "k8s.io/kubernetes/cmd/kubeadm/app/util/token"
@ -45,11 +44,7 @@ func setInitDynamicDefaults(cfg *kubeadmapi.MasterConfiguration) error {
// Validate version argument // Validate version argument
ver, err := kubeadmutil.KubernetesReleaseVersion(cfg.KubernetesVersion) ver, err := kubeadmutil.KubernetesReleaseVersion(cfg.KubernetesVersion)
if err != nil { if err != nil {
if cfg.KubernetesVersion != kubeadmapiext.DefaultKubernetesVersion { return err
return err
} else {
ver = kubeadmapiext.DefaultKubernetesFallbackVersion
}
} }
cfg.KubernetesVersion = ver cfg.KubernetesVersion = ver

View file

@ -105,16 +105,16 @@ func (r *Reset) Run(out io.Writer) error {
} }
dockerCheck := preflight.ServiceCheck{Service: "docker", CheckIfActive: true} dockerCheck := preflight.ServiceCheck{Service: "docker", CheckIfActive: true}
if warnings, errors := dockerCheck.Check(); len(warnings) == 0 && len(errors) == 0 { if _, errors := dockerCheck.Check(); len(errors) == 0 {
fmt.Println("[reset] Removing kubernetes-managed containers") fmt.Println("[reset] Removing kubernetes-managed containers")
if err := exec.Command("sh", "-c", "docker ps | grep 'k8s_' | awk '{print $1}' | xargs -r docker rm --force --volumes").Run(); err != nil { if err := exec.Command("sh", "-c", "docker ps -a --filter name=k8s_ -q | xargs -r docker rm --force --volumes").Run(); err != nil {
fmt.Println("[reset] Failed to stop the running containers") fmt.Println("[reset] Failed to stop the running containers")
} }
} else { } else {
fmt.Println("[reset] docker doesn't seem to be running, skipping the removal of running kubernetes containers") fmt.Println("[reset] docker doesn't seem to be running, skipping the removal of running kubernetes containers")
} }
dirsToClean := []string{"/var/lib/kubelet", "/etc/cni/net.d"} dirsToClean := []string{"/var/lib/kubelet", "/etc/cni/net.d", "/var/lib/dockershim"}
// Only clear etcd data when the etcd manifest is found. In case it is not found, we must assume that the user // Only clear etcd data when the etcd manifest is found. In case it is not found, we must assume that the user
// provided external etcd endpoints. In that case, it is his own responsibility to reset etcd // provided external etcd endpoints. In that case, it is his own responsibility to reset etcd

View file

@ -61,7 +61,7 @@ func attemptToUpdateMasterRoleLabelsAndTaints(client *clientset.Clientset) error
// The master node is tainted and labelled accordingly // The master node is tainted and labelled accordingly
n.ObjectMeta.Labels[kubeadmconstants.LabelNodeRoleMaster] = "" n.ObjectMeta.Labels[kubeadmconstants.LabelNodeRoleMaster] = ""
n.Spec.Taints = []v1.Taint{{Key: kubeadmconstants.LabelNodeRoleMaster, Value: "", Effect: "NoSchedule"}} n.Spec.Taints = append(n.Spec.Taints, v1.Taint{Key: kubeadmconstants.LabelNodeRoleMaster, Value: "", Effect: "NoSchedule"})
newData, err := json.Marshal(n) newData, err := json.Marshal(n)
if err != nil { if err != nil {

View file

@ -61,9 +61,11 @@ metadata:
provisioner: kubernetes.io/vsphere-volume provisioner: kubernetes.io/vsphere-volume
parameters: parameters:
diskformat: eagerzeroedthick diskformat: eagerzeroedthick
fstype: ext3
``` ```
* `diskformat`: `thin`, `zeroedthick` and `eagerzeroedthick`. See vSphere docs for details. Default: `"thin"`. * `diskformat`: `thin`, `zeroedthick` and `eagerzeroedthick`. See vSphere docs for details. Default: `"thin"`.
* `fstype`: fstype that are supported by kubernetes. Default: `"ext4"`.
#### Portworx Volume #### Portworx Volume

View file

@ -5,6 +5,8 @@
- [Volumes](#volumes) - [Volumes](#volumes)
- [Persistent Volumes](#persistent-volumes) - [Persistent Volumes](#persistent-volumes)
- [Storage Class](#storage-class) - [Storage Class](#storage-class)
- [Virtual SAN policy support inside Kubernetes](#virtual-san-policy-support-inside-kubernetes)
- [Stateful Set](#stateful-set)
## Prerequisites ## Prerequisites
@ -212,6 +214,7 @@
provisioner: kubernetes.io/vsphere-volume provisioner: kubernetes.io/vsphere-volume
parameters: parameters:
diskformat: zeroedthick diskformat: zeroedthick
fstype: ext3
``` ```
[Download example](vsphere-volume-sc-fast.yaml?raw=true) [Download example](vsphere-volume-sc-fast.yaml?raw=true)
@ -243,11 +246,12 @@
``` bash ``` bash
$ kubectl describe storageclass fast $ kubectl describe storageclass fast
Name: fast Name: fast
Annotations: <none> IsDefaultClass: No
Provisioner: kubernetes.io/vsphere-volume Annotations: <none>
Parameters: diskformat=zeroedthick Provisioner: kubernetes.io/vsphere-volume
No events. Parameters: diskformat=zeroedthick,fstype=ext3
No events.
``` ```
2. Create Persistent Volume Claim. 2. Create Persistent Volume Claim.
@ -281,14 +285,19 @@
``` bash ``` bash
$ kubectl describe pvc pvcsc001 $ kubectl describe pvc pvcsc001
Name: pvcsc001 Name: pvcsc001
Namespace: default Namespace: default
Status: Bound StorageClass: fast
Volume: pvc-80f7b5c1-94b6-11e6-a24f-005056a79d2d Status: Bound
Labels: <none> Volume: pvc-83295256-f8e0-11e6-8263-005056b2349c
Capacity: 2Gi Labels: <none>
Access Modes: RWO Capacity: 2Gi
No events. Access Modes: RWO
Events:
FirstSeen LastSeen Count From SubObjectPath Type Reason Message
--------- -------- ----- ---- ------------- -------- ------ -------
1m 1m 1 persistentvolume-controller Normal ProvisioningSucceeded Successfully provisioned volume pvc-83295256-f8e0-11e6-8263-005056b2349c using kubernetes.io/vsphere-volume
``` ```
Persistent Volume is automatically created and is bounded to this pvc. Persistent Volume is automatically created and is bounded to this pvc.
@ -296,19 +305,20 @@
Verifying persistent volume claim is created: Verifying persistent volume claim is created:
``` bash ``` bash
$ kubectl describe pv pvc-80f7b5c1-94b6-11e6-a24f-005056a79d2d $ kubectl describe pv pvc-83295256-f8e0-11e6-8263-005056b2349c
Name: pvc-80f7b5c1-94b6-11e6-a24f-005056a79d2d Name: pvc-83295256-f8e0-11e6-8263-005056b2349c
Labels: <none> Labels: <none>
Status: Bound StorageClass: fast
Claim: default/pvcsc001 Status: Bound
Reclaim Policy: Delete Claim: default/pvcsc001
Access Modes: RWO Reclaim Policy: Delete
Capacity: 2Gi Access Modes: RWO
Capacity: 2Gi
Message: Message:
Source: Source:
Type: vSphereVolume (a Persistent Disk resource in vSphere) Type: vSphereVolume (a Persistent Disk resource in vSphere)
VolumePath: [datastore1] kubevols/kubernetes-dynamic-pvc-80f7b5c1-94b6-11e6-a24f-005056a79d2d.vmdk VolumePath: [datastore1] kubevols/kubernetes-dynamic-pvc-83295256-f8e0-11e6-8263-005056b2349c.vmdk
FSType: ext4 FSType: ext3
No events. No events.
``` ```
@ -353,6 +363,257 @@
pvpod 1/1 Running 0 48m pvpod 1/1 Running 0 48m
``` ```
### Virtual SAN policy support inside Kubernetes
Vsphere Infrastructure(VI) Admins will have the ability to specify custom Virtual SAN Storage Capabilities during dynamic volume provisioning. You can now define storage requirements, such as performance and availability, in the form of storage capabilities during dynamic volume provisioning. The storage capability requirements are converted into a Virtual SAN policy which are then pushed down to the Virtual SAN layer when a persistent volume (virtual disk) is being created. The virtual disk is distributed across the Virtual SAN datastore to meet the requirements.
The official [VSAN policy documentation](https://pubs.vmware.com/vsphere-65/index.jsp?topic=%2Fcom.vmware.vsphere.virtualsan.doc%2FGUID-08911FD3-2462-4C1C-AE81-0D4DBC8F7990.html) describes in detail about each of the individual storage capabilities that are supported by VSAN. The user can specify these storage capabilities as part of storage class defintion based on his application needs.
The policy settings can be one or more of the following:
* *hostFailuresToTolerate*: represents NumberOfFailuresToTolerate
* *diskStripes*: represents NumberofDiskStripesPerObject
* *objectSpaceReservation*: represents ObjectSpaceReservation
* *cacheReservation*: represents FlashReadCacheReservation
* *iopsLimit*: represents IOPSLimitForObject
* *forceProvisioning*: represents if volume must be Force Provisioned
__Note: Here you don't need to create persistent volume it is created for you.__
1. Create Storage Class.
Example 1:
```yaml
kind: StorageClass
apiVersion: storage.k8s.io/v1beta1
metadata:
name: fast
provisioner: kubernetes.io/vsphere-volume
parameters:
diskformat: zeroedthick
hostFailuresToTolerate: "2"
cachereservation: "20"
```
[Download example](vsphere-volume-sc-vsancapabilities.yaml?raw=true)
Here a persistent volume will be created with the Virtual SAN capabilities - hostFailuresToTolerate to 2 and cachereservation is 20% read cache reserved for storage object. Also the persistent volume will be *zeroedthick* disk.
The official [VSAN policy documentation](https://pubs.vmware.com/vsphere-65/index.jsp?topic=%2Fcom.vmware.vsphere.virtualsan.doc%2FGUID-08911FD3-2462-4C1C-AE81-0D4DBC8F7990.html) describes in detail about each of the individual storage capabilities that are supported by VSAN and can be configured on the virtual disk.
You can also specify the datastore in the Storageclass as shown in example 2. The volume will be created on the datastore specified in the storage class.
This field is optional. If not specified as shown in example 1, the volume will be created on the datastore specified in the vsphere config file used to initialize the vSphere Cloud Provider.
Example 2:
```yaml
kind: StorageClass
apiVersion: storage.k8s.io/v1beta1
metadata:
name: fast
provisioner: kubernetes.io/vsphere-volume
parameters:
diskformat: zeroedthick
datastore: VSANDatastore
hostFailuresToTolerate: "2"
cachereservation: "20"
```
[Download example](vsphere-volume-sc-vsancapabilities-with-datastore.yaml?raw=true)
__Note: If you do not apply a storage policy during dynamic provisioning on a VSAN datastore, it will use a default Virtual SAN policy.__
Creating the storageclass:
``` bash
$ kubectl create -f examples/volumes/vsphere/vsphere-volume-sc-vsancapabilities.yaml
```
Verifying storage class is created:
``` bash
$ kubectl describe storageclass fast
Name: fast
Annotations: <none>
Provisioner: kubernetes.io/vsphere-volume
Parameters: diskformat=zeroedthick, hostFailuresToTolerate="2", cachereservation="20"
No events.
```
2. Create Persistent Volume Claim.
See example:
```yaml
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: pvcsc-vsan
annotations:
volume.beta.kubernetes.io/storage-class: fast
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 2Gi
```
[Download example](vsphere-volume-pvcsc.yaml?raw=true)
Creating the persistent volume claim:
``` bash
$ kubectl create -f examples/volumes/vsphere/vsphere-volume-pvcsc.yaml
```
Verifying persistent volume claim is created:
``` bash
$ kubectl describe pvc pvcsc-vsan
Name: pvcsc-vsan
Namespace: default
Status: Bound
Volume: pvc-80f7b5c1-94b6-11e6-a24f-005056a79d2d
Labels: <none>
Capacity: 2Gi
Access Modes: RWO
No events.
```
Persistent Volume is automatically created and is bounded to this pvc.
Verifying persistent volume claim is created:
``` bash
$ kubectl describe pv pvc-80f7b5c1-94b6-11e6-a24f-005056a79d2d
Name: pvc-80f7b5c1-94b6-11e6-a24f-005056a79d2d
Labels: <none>
Status: Bound
Claim: default/pvcsc-vsan
Reclaim Policy: Delete
Access Modes: RWO
Capacity: 2Gi
Message:
Source:
Type: vSphereVolume (a Persistent Disk resource in vSphere)
VolumePath: [VSANDatastore] kubevols/kubernetes-dynamic-pvc-80f7b5c1-94b6-11e6-a24f-005056a79d2d.vmdk
FSType: ext4
No events.
```
__Note: VMDK is created inside ```kubevols``` folder in datastore which is mentioned in 'vsphere' cloudprovider configuration.
The cloudprovider config is created during setup of Kubernetes cluster on vSphere.__
3. Create Pod which uses Persistent Volume Claim with storage class.
See example:
```yaml
apiVersion: v1
kind: Pod
metadata:
name: pvpod
spec:
containers:
- name: test-container
image: gcr.io/google_containers/test-webserver
volumeMounts:
- name: test-volume
mountPath: /test
volumes:
- name: test-volume
persistentVolumeClaim:
claimName: pvcsc-vsan
```
[Download example](vsphere-volume-pvcscpod.yaml?raw=true)
Creating the pod:
``` bash
$ kubectl create -f examples/volumes/vsphere/vsphere-volume-pvcscpod.yaml
```
Verifying pod is created:
``` bash
$ kubectl get pod pvpod
NAME READY STATUS RESTARTS AGE
pvpod 1/1 Running 0 48m
```
### Stateful Set
vSphere volumes can be consumed by Stateful Sets.
1. Create a storage class that will be used by the ```volumeClaimTemplates``` of a Stateful Set.
See example:
```yaml
kind: StorageClass
apiVersion: storage.k8s.io/v1beta1
metadata:
name: thin-disk
provisioner: kubernetes.io/vsphere-volume
parameters:
diskformat: thin
```
[Download example](simple-storageclass.yaml)
2. Create a Stateful set that consumes storage from the Storage Class created.
See example:
```yaml
---
apiVersion: v1
kind: Service
metadata:
name: nginx
labels:
app: nginx
spec:
ports:
- port: 80
name: web
clusterIP: None
selector:
app: nginx
---
apiVersion: apps/v1beta1
kind: StatefulSet
metadata:
name: web
spec:
serviceName: "nginx"
replicas: 14
template:
metadata:
labels:
app: nginx
spec:
containers:
- name: nginx
image: gcr.io/google_containers/nginx-slim:0.8
ports:
- containerPort: 80
name: web
volumeMounts:
- name: www
mountPath: /usr/share/nginx/html
volumeClaimTemplates:
- metadata:
name: www
annotations:
volume.beta.kubernetes.io/storage-class: thin-disk
spec:
accessModes: [ "ReadWriteOnce" ]
resources:
requests:
storage: 1Gi
```
This will create Persistent Volume Claims for each replica and provision a volume for each claim if an existing volume could be bound to the claim.
<!-- BEGIN MUNGE: GENERATED_ANALYTICS --> <!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/examples/volumes/vsphere/README.md?pixel)]() [![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/examples/volumes/vsphere/README.md?pixel)]()

View file

@ -4,4 +4,5 @@ metadata:
name: fast name: fast
provisioner: kubernetes.io/vsphere-volume provisioner: kubernetes.io/vsphere-volume
parameters: parameters:
diskformat: zeroedthick diskformat: zeroedthick
fstype: ext3

View file

@ -0,0 +1,10 @@
kind: StorageClass
apiVersion: storage.k8s.io/v1beta1
metadata:
name: fast
provisioner: kubernetes.io/vsphere-volume
parameters:
diskformat: zeroedthick
datastore: vsanDatastore
hostFailuresToTolerate: "2"
cachereservation: "20"

View file

@ -0,0 +1,9 @@
kind: StorageClass
apiVersion: storage.k8s.io/v1beta1
metadata:
name: fast
provisioner: kubernetes.io/vsphere-volume
parameters:
diskformat: zeroedthick
hostFailuresToTolerate: "2"
cachereservation: "20"

View file

@ -2,7 +2,7 @@
"swagger": "2.0", "swagger": "2.0",
"info": { "info": {
"title": "Generic API Server", "title": "Generic API Server",
"version": "v1.6.1" "version": "v1.6.4"
}, },
"paths": { "paths": {
"/api/": { "/api/": {

View file

@ -169,7 +169,8 @@ func NewConfigMapController(client federationclientset.Interface) *ConfigMapCont
}, },
func(client kubeclientset.Interface, obj pkgruntime.Object) error { func(client kubeclientset.Interface, obj pkgruntime.Object) error {
configmap := obj.(*apiv1.ConfigMap) configmap := obj.(*apiv1.ConfigMap)
err := client.Core().ConfigMaps(configmap.Namespace).Delete(configmap.Name, &metav1.DeleteOptions{}) orphanDependents := false
err := client.Core().ConfigMaps(configmap.Namespace).Delete(configmap.Name, &metav1.DeleteOptions{OrphanDependents: &orphanDependents})
return err return err
}) })
@ -202,13 +203,13 @@ func (configmapcontroller *ConfigMapController) hasFinalizerFunc(obj pkgruntime.
return false return false
} }
// removeFinalizerFunc removes the finalizer from the given objects ObjectMeta. Assumes that the given object is a configmap. // removeFinalizerFunc removes the given finalizers from the given objects ObjectMeta. Assumes that the given object is a configmap.
func (configmapcontroller *ConfigMapController) removeFinalizerFunc(obj pkgruntime.Object, finalizer string) (pkgruntime.Object, error) { func (configmapcontroller *ConfigMapController) removeFinalizerFunc(obj pkgruntime.Object, finalizers []string) (pkgruntime.Object, error) {
configmap := obj.(*apiv1.ConfigMap) configmap := obj.(*apiv1.ConfigMap)
newFinalizers := []string{} newFinalizers := []string{}
hasFinalizer := false hasFinalizer := false
for i := range configmap.ObjectMeta.Finalizers { for i := range configmap.ObjectMeta.Finalizers {
if string(configmap.ObjectMeta.Finalizers[i]) != finalizer { if !deletionhelper.ContainsString(finalizers, configmap.ObjectMeta.Finalizers[i]) {
newFinalizers = append(newFinalizers, configmap.ObjectMeta.Finalizers[i]) newFinalizers = append(newFinalizers, configmap.ObjectMeta.Finalizers[i])
} else { } else {
hasFinalizer = true hasFinalizer = true
@ -221,7 +222,7 @@ func (configmapcontroller *ConfigMapController) removeFinalizerFunc(obj pkgrunti
configmap.ObjectMeta.Finalizers = newFinalizers configmap.ObjectMeta.Finalizers = newFinalizers
configmap, err := configmapcontroller.federatedApiClient.Core().ConfigMaps(configmap.Namespace).Update(configmap) configmap, err := configmapcontroller.federatedApiClient.Core().ConfigMaps(configmap.Namespace).Update(configmap)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to remove finalizer %s from configmap %s: %v", finalizer, configmap.Name, err) return nil, fmt.Errorf("failed to remove finalizers %v from configmap %s: %v", finalizers, configmap.Name, err)
} }
return configmap, nil return configmap, nil
} }

View file

@ -182,7 +182,8 @@ func NewDaemonSetController(client federationclientset.Interface) *DaemonSetCont
func(client kubeclientset.Interface, obj pkgruntime.Object) error { func(client kubeclientset.Interface, obj pkgruntime.Object) error {
daemonset := obj.(*extensionsv1.DaemonSet) daemonset := obj.(*extensionsv1.DaemonSet)
glog.V(4).Infof("Attempting to delete daemonset: %s/%s", daemonset.Namespace, daemonset.Name) glog.V(4).Infof("Attempting to delete daemonset: %s/%s", daemonset.Namespace, daemonset.Name)
err := client.Extensions().DaemonSets(daemonset.Namespace).Delete(daemonset.Name, &metav1.DeleteOptions{}) orphanDependents := false
err := client.Extensions().DaemonSets(daemonset.Namespace).Delete(daemonset.Name, &metav1.DeleteOptions{OrphanDependents: &orphanDependents})
if err != nil { if err != nil {
glog.Errorf("Error deleting daemonset %s/%s/: %v", daemonset.Namespace, daemonset.Name, err) glog.Errorf("Error deleting daemonset %s/%s/: %v", daemonset.Namespace, daemonset.Name, err)
} else { } else {
@ -220,14 +221,14 @@ func (daemonsetcontroller *DaemonSetController) hasFinalizerFunc(obj pkgruntime.
return false return false
} }
// Removes the finalizer from the given objects ObjectMeta. // Removes the finalizers from the given objects ObjectMeta.
// Assumes that the given object is a daemonset. // Assumes that the given object is a daemonset.
func (daemonsetcontroller *DaemonSetController) removeFinalizerFunc(obj pkgruntime.Object, finalizer string) (pkgruntime.Object, error) { func (daemonsetcontroller *DaemonSetController) removeFinalizerFunc(obj pkgruntime.Object, finalizers []string) (pkgruntime.Object, error) {
daemonset := obj.(*extensionsv1.DaemonSet) daemonset := obj.(*extensionsv1.DaemonSet)
newFinalizers := []string{} newFinalizers := []string{}
hasFinalizer := false hasFinalizer := false
for i := range daemonset.ObjectMeta.Finalizers { for i := range daemonset.ObjectMeta.Finalizers {
if string(daemonset.ObjectMeta.Finalizers[i]) != finalizer { if !deletionhelper.ContainsString(finalizers, daemonset.ObjectMeta.Finalizers[i]) {
newFinalizers = append(newFinalizers, daemonset.ObjectMeta.Finalizers[i]) newFinalizers = append(newFinalizers, daemonset.ObjectMeta.Finalizers[i])
} else { } else {
hasFinalizer = true hasFinalizer = true
@ -240,7 +241,7 @@ func (daemonsetcontroller *DaemonSetController) removeFinalizerFunc(obj pkgrunti
daemonset.ObjectMeta.Finalizers = newFinalizers daemonset.ObjectMeta.Finalizers = newFinalizers
daemonset, err := daemonsetcontroller.federatedApiClient.Extensions().DaemonSets(daemonset.Namespace).Update(daemonset) daemonset, err := daemonsetcontroller.federatedApiClient.Extensions().DaemonSets(daemonset.Namespace).Update(daemonset)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to remove finalizer %s from daemonset %s: %v", finalizer, daemonset.Name, err) return nil, fmt.Errorf("failed to remove finalizers %v from daemonset %s: %v", finalizers, daemonset.Name, err)
} }
return daemonset, nil return daemonset, nil
} }

View file

@ -201,7 +201,8 @@ func NewDeploymentController(federationClient fedclientset.Interface) *Deploymen
}, },
func(client kubeclientset.Interface, obj runtime.Object) error { func(client kubeclientset.Interface, obj runtime.Object) error {
rs := obj.(*extensionsv1.Deployment) rs := obj.(*extensionsv1.Deployment)
err := client.Extensions().Deployments(rs.Namespace).Delete(rs.Name, &metav1.DeleteOptions{}) orphanDependents := false
err := client.Extensions().Deployments(rs.Namespace).Delete(rs.Name, &metav1.DeleteOptions{OrphanDependents: &orphanDependents})
return err return err
}) })
@ -234,14 +235,14 @@ func (fdc *DeploymentController) hasFinalizerFunc(obj runtime.Object, finalizer
return false return false
} }
// Removes the finalizer from the given objects ObjectMeta. // Removes the finalizers from the given objects ObjectMeta.
// Assumes that the given object is a deployment. // Assumes that the given object is a deployment.
func (fdc *DeploymentController) removeFinalizerFunc(obj runtime.Object, finalizer string) (runtime.Object, error) { func (fdc *DeploymentController) removeFinalizerFunc(obj runtime.Object, finalizers []string) (runtime.Object, error) {
deployment := obj.(*extensionsv1.Deployment) deployment := obj.(*extensionsv1.Deployment)
newFinalizers := []string{} newFinalizers := []string{}
hasFinalizer := false hasFinalizer := false
for i := range deployment.ObjectMeta.Finalizers { for i := range deployment.ObjectMeta.Finalizers {
if string(deployment.ObjectMeta.Finalizers[i]) != finalizer { if !deletionhelper.ContainsString(finalizers, deployment.ObjectMeta.Finalizers[i]) {
newFinalizers = append(newFinalizers, deployment.ObjectMeta.Finalizers[i]) newFinalizers = append(newFinalizers, deployment.ObjectMeta.Finalizers[i])
} else { } else {
hasFinalizer = true hasFinalizer = true
@ -254,7 +255,7 @@ func (fdc *DeploymentController) removeFinalizerFunc(obj runtime.Object, finaliz
deployment.ObjectMeta.Finalizers = newFinalizers deployment.ObjectMeta.Finalizers = newFinalizers
deployment, err := fdc.fedClient.Extensions().Deployments(deployment.Namespace).Update(deployment) deployment, err := fdc.fedClient.Extensions().Deployments(deployment.Namespace).Update(deployment)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to remove finalizer %s from deployment %s: %v", finalizer, deployment.Name, err) return nil, fmt.Errorf("failed to remove finalizers %v from deployment %s: %v", finalizers, deployment.Name, err)
} }
return deployment, nil return deployment, nil
} }

View file

@ -255,7 +255,8 @@ func NewIngressController(client federationclientset.Interface) *IngressControll
func(client kubeclientset.Interface, obj pkgruntime.Object) error { func(client kubeclientset.Interface, obj pkgruntime.Object) error {
ingress := obj.(*extensionsv1beta1.Ingress) ingress := obj.(*extensionsv1beta1.Ingress)
glog.V(4).Infof("Attempting to delete Ingress: %v", ingress) glog.V(4).Infof("Attempting to delete Ingress: %v", ingress)
err := client.Extensions().Ingresses(ingress.Namespace).Delete(ingress.Name, &metav1.DeleteOptions{}) orphanDependents := false
err := client.Extensions().Ingresses(ingress.Namespace).Delete(ingress.Name, &metav1.DeleteOptions{OrphanDependents: &orphanDependents})
return err return err
}) })
@ -316,14 +317,14 @@ func (ic *IngressController) hasFinalizerFunc(obj pkgruntime.Object, finalizer s
return false return false
} }
// Removes the finalizer from the given objects ObjectMeta. // Removes the finalizers from the given objects ObjectMeta.
// Assumes that the given object is a ingress. // Assumes that the given object is a ingress.
func (ic *IngressController) removeFinalizerFunc(obj pkgruntime.Object, finalizer string) (pkgruntime.Object, error) { func (ic *IngressController) removeFinalizerFunc(obj pkgruntime.Object, finalizers []string) (pkgruntime.Object, error) {
ingress := obj.(*extensionsv1beta1.Ingress) ingress := obj.(*extensionsv1beta1.Ingress)
newFinalizers := []string{} newFinalizers := []string{}
hasFinalizer := false hasFinalizer := false
for i := range ingress.ObjectMeta.Finalizers { for i := range ingress.ObjectMeta.Finalizers {
if string(ingress.ObjectMeta.Finalizers[i]) != finalizer { if !deletionhelper.ContainsString(finalizers, ingress.ObjectMeta.Finalizers[i]) {
newFinalizers = append(newFinalizers, ingress.ObjectMeta.Finalizers[i]) newFinalizers = append(newFinalizers, ingress.ObjectMeta.Finalizers[i])
} else { } else {
hasFinalizer = true hasFinalizer = true
@ -336,7 +337,7 @@ func (ic *IngressController) removeFinalizerFunc(obj pkgruntime.Object, finalize
ingress.ObjectMeta.Finalizers = newFinalizers ingress.ObjectMeta.Finalizers = newFinalizers
ingress, err := ic.federatedApiClient.Extensions().Ingresses(ingress.Namespace).Update(ingress) ingress, err := ic.federatedApiClient.Extensions().Ingresses(ingress.Namespace).Update(ingress)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to remove finalizer %s from ingress %s: %v", finalizer, ingress.Name, err) return nil, fmt.Errorf("failed to remove finalizers %v from ingress %s: %v", finalizers, ingress.Name, err)
} }
return ingress, nil return ingress, nil
} }

View file

@ -169,7 +169,8 @@ func NewNamespaceController(client federationclientset.Interface, dynamicClientP
}, },
func(client kubeclientset.Interface, obj runtime.Object) error { func(client kubeclientset.Interface, obj runtime.Object) error {
namespace := obj.(*apiv1.Namespace) namespace := obj.(*apiv1.Namespace)
err := client.Core().Namespaces().Delete(namespace.Name, &metav1.DeleteOptions{}) orphanDependents := false
err := client.Core().Namespaces().Delete(namespace.Name, &metav1.DeleteOptions{OrphanDependents: &orphanDependents})
// IsNotFound error is fine since that means the object is deleted already. // IsNotFound error is fine since that means the object is deleted already.
if errors.IsNotFound(err) { if errors.IsNotFound(err) {
return nil return nil
@ -210,14 +211,14 @@ func (nc *NamespaceController) hasFinalizerFunc(obj runtime.Object, finalizer st
return false return false
} }
// Removes the finalizer from the given objects ObjectMeta. // Removes the finalizers from the given objects ObjectMeta.
// Assumes that the given object is a namespace. // Assumes that the given object is a namespace.
func (nc *NamespaceController) removeFinalizerFunc(obj runtime.Object, finalizer string) (runtime.Object, error) { func (nc *NamespaceController) removeFinalizerFunc(obj runtime.Object, finalizers []string) (runtime.Object, error) {
namespace := obj.(*apiv1.Namespace) namespace := obj.(*apiv1.Namespace)
newFinalizers := []string{} newFinalizers := []string{}
hasFinalizer := false hasFinalizer := false
for i := range namespace.ObjectMeta.Finalizers { for i := range namespace.ObjectMeta.Finalizers {
if string(namespace.ObjectMeta.Finalizers[i]) != finalizer { if !deletionhelper.ContainsString(finalizers, namespace.ObjectMeta.Finalizers[i]) {
newFinalizers = append(newFinalizers, namespace.ObjectMeta.Finalizers[i]) newFinalizers = append(newFinalizers, namespace.ObjectMeta.Finalizers[i])
} else { } else {
hasFinalizer = true hasFinalizer = true
@ -230,7 +231,7 @@ func (nc *NamespaceController) removeFinalizerFunc(obj runtime.Object, finalizer
namespace.ObjectMeta.Finalizers = newFinalizers namespace.ObjectMeta.Finalizers = newFinalizers
namespace, err := nc.federatedApiClient.Core().Namespaces().Update(namespace) namespace, err := nc.federatedApiClient.Core().Namespaces().Update(namespace)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to remove finalizer %s from namespace %s: %v", finalizer, namespace.Name, err) return nil, fmt.Errorf("failed to remove finalizers %v from namespace %s: %v", finalizers, namespace.Name, err)
} }
return namespace, nil return namespace, nil
} }

View file

@ -209,7 +209,8 @@ func NewReplicaSetController(federationClient fedclientset.Interface) *ReplicaSe
}, },
func(client kubeclientset.Interface, obj runtime.Object) error { func(client kubeclientset.Interface, obj runtime.Object) error {
rs := obj.(*extensionsv1.ReplicaSet) rs := obj.(*extensionsv1.ReplicaSet)
err := client.Extensions().ReplicaSets(rs.Namespace).Delete(rs.Name, &metav1.DeleteOptions{}) orphanDependents := false
err := client.Extensions().ReplicaSets(rs.Namespace).Delete(rs.Name, &metav1.DeleteOptions{OrphanDependents: &orphanDependents})
return err return err
}) })
@ -242,14 +243,14 @@ func (frsc *ReplicaSetController) hasFinalizerFunc(obj runtime.Object, finalizer
return false return false
} }
// Removes the finalizer from the given objects ObjectMeta. // Removes the finalizers from the given objects ObjectMeta.
// Assumes that the given object is a replicaset. // Assumes that the given object is a replicaset.
func (frsc *ReplicaSetController) removeFinalizerFunc(obj runtime.Object, finalizer string) (runtime.Object, error) { func (frsc *ReplicaSetController) removeFinalizerFunc(obj runtime.Object, finalizers []string) (runtime.Object, error) {
replicaset := obj.(*extensionsv1.ReplicaSet) replicaset := obj.(*extensionsv1.ReplicaSet)
newFinalizers := []string{} newFinalizers := []string{}
hasFinalizer := false hasFinalizer := false
for i := range replicaset.ObjectMeta.Finalizers { for i := range replicaset.ObjectMeta.Finalizers {
if string(replicaset.ObjectMeta.Finalizers[i]) != finalizer { if !deletionhelper.ContainsString(finalizers, replicaset.ObjectMeta.Finalizers[i]) {
newFinalizers = append(newFinalizers, replicaset.ObjectMeta.Finalizers[i]) newFinalizers = append(newFinalizers, replicaset.ObjectMeta.Finalizers[i])
} else { } else {
hasFinalizer = true hasFinalizer = true
@ -262,7 +263,7 @@ func (frsc *ReplicaSetController) removeFinalizerFunc(obj runtime.Object, finali
replicaset.ObjectMeta.Finalizers = newFinalizers replicaset.ObjectMeta.Finalizers = newFinalizers
replicaset, err := frsc.fedClient.Extensions().ReplicaSets(replicaset.Namespace).Update(replicaset) replicaset, err := frsc.fedClient.Extensions().ReplicaSets(replicaset.Namespace).Update(replicaset)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to remove finalizer %s from replicaset %s: %v", finalizer, replicaset.Name, err) return nil, fmt.Errorf("failed to remove finalizers %v from replicaset %s: %v", finalizers, replicaset.Name, err)
} }
return replicaset, nil return replicaset, nil
} }

View file

@ -168,7 +168,8 @@ func NewSecretController(client federationclientset.Interface) *SecretController
}, },
func(client kubeclientset.Interface, obj pkgruntime.Object) error { func(client kubeclientset.Interface, obj pkgruntime.Object) error {
secret := obj.(*apiv1.Secret) secret := obj.(*apiv1.Secret)
err := client.Core().Secrets(secret.Namespace).Delete(secret.Name, &metav1.DeleteOptions{}) orphanDependents := false
err := client.Core().Secrets(secret.Namespace).Delete(secret.Name, &metav1.DeleteOptions{OrphanDependents: &orphanDependents})
return err return err
}) })
@ -201,14 +202,14 @@ func (secretcontroller *SecretController) hasFinalizerFunc(obj pkgruntime.Object
return false return false
} }
// Removes the finalizer from the given objects ObjectMeta. // Removes the finalizers from the given objects ObjectMeta.
// Assumes that the given object is a secret. // Assumes that the given object is a secret.
func (secretcontroller *SecretController) removeFinalizerFunc(obj pkgruntime.Object, finalizer string) (pkgruntime.Object, error) { func (secretcontroller *SecretController) removeFinalizerFunc(obj pkgruntime.Object, finalizers []string) (pkgruntime.Object, error) {
secret := obj.(*apiv1.Secret) secret := obj.(*apiv1.Secret)
newFinalizers := []string{} newFinalizers := []string{}
hasFinalizer := false hasFinalizer := false
for i := range secret.ObjectMeta.Finalizers { for i := range secret.ObjectMeta.Finalizers {
if string(secret.ObjectMeta.Finalizers[i]) != finalizer { if !deletionhelper.ContainsString(finalizers, secret.ObjectMeta.Finalizers[i]) {
newFinalizers = append(newFinalizers, secret.ObjectMeta.Finalizers[i]) newFinalizers = append(newFinalizers, secret.ObjectMeta.Finalizers[i])
} else { } else {
hasFinalizer = true hasFinalizer = true
@ -221,7 +222,7 @@ func (secretcontroller *SecretController) removeFinalizerFunc(obj pkgruntime.Obj
secret.ObjectMeta.Finalizers = newFinalizers secret.ObjectMeta.Finalizers = newFinalizers
secret, err := secretcontroller.federatedApiClient.Core().Secrets(secret.Namespace).Update(secret) secret, err := secretcontroller.federatedApiClient.Core().Secrets(secret.Namespace).Update(secret)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to remove finalizer %s from secret %s: %v", finalizer, secret.Name, err) return nil, fmt.Errorf("failed to remove finalizers %v from secret %s: %v", finalizers, secret.Name, err)
} }
return secret, nil return secret, nil
} }

View file

@ -291,7 +291,8 @@ func New(federationClient fedclientset.Interface, dns dnsprovider.Interface,
}, },
func(client kubeclientset.Interface, obj pkgruntime.Object) error { func(client kubeclientset.Interface, obj pkgruntime.Object) error {
svc := obj.(*v1.Service) svc := obj.(*v1.Service)
err := client.Core().Services(svc.Namespace).Delete(svc.Name, &metav1.DeleteOptions{}) orphanDependents := false
err := client.Core().Services(svc.Namespace).Delete(svc.Name, &metav1.DeleteOptions{OrphanDependents: &orphanDependents})
return err return err
}) })
@ -328,14 +329,14 @@ func (s *ServiceController) hasFinalizerFunc(obj pkgruntime.Object, finalizer st
return false return false
} }
// Removes the finalizer from the given objects ObjectMeta. // Removes the finalizers from the given objects ObjectMeta.
// Assumes that the given object is a service. // Assumes that the given object is a service.
func (s *ServiceController) removeFinalizerFunc(obj pkgruntime.Object, finalizer string) (pkgruntime.Object, error) { func (s *ServiceController) removeFinalizerFunc(obj pkgruntime.Object, finalizers []string) (pkgruntime.Object, error) {
service := obj.(*v1.Service) service := obj.(*v1.Service)
newFinalizers := []string{} newFinalizers := []string{}
hasFinalizer := false hasFinalizer := false
for i := range service.ObjectMeta.Finalizers { for i := range service.ObjectMeta.Finalizers {
if string(service.ObjectMeta.Finalizers[i]) != finalizer { if !deletionhelper.ContainsString(finalizers, service.ObjectMeta.Finalizers[i]) {
newFinalizers = append(newFinalizers, service.ObjectMeta.Finalizers[i]) newFinalizers = append(newFinalizers, service.ObjectMeta.Finalizers[i])
} else { } else {
hasFinalizer = true hasFinalizer = true
@ -348,7 +349,7 @@ func (s *ServiceController) removeFinalizerFunc(obj pkgruntime.Object, finalizer
service.ObjectMeta.Finalizers = newFinalizers service.ObjectMeta.Finalizers = newFinalizers
service, err := s.federationClient.Core().Services(service.Namespace).Update(service) service, err := s.federationClient.Core().Services(service.Namespace).Update(service)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to remove finalizer %s from service %s: %v", finalizer, service.Name, err) return nil, fmt.Errorf("failed to remove finalizers %v from service %s: %v", finalizers, service.Name, err)
} }
return service, nil return service, nil
} }

View file

@ -9,7 +9,10 @@ load(
go_library( go_library(
name = "go_default_library", name = "go_default_library",
srcs = ["deletion_helper.go"], srcs = [
"deletion_helper.go",
"util.go",
],
tags = ["automanaged"], tags = ["automanaged"],
deps = [ deps = [
"//federation/pkg/federation-controller/util:go_default_library", "//federation/pkg/federation-controller/util:go_default_library",

View file

@ -45,7 +45,7 @@ const (
) )
type HasFinalizerFunc func(runtime.Object, string) bool type HasFinalizerFunc func(runtime.Object, string) bool
type RemoveFinalizerFunc func(runtime.Object, string) (runtime.Object, error) type RemoveFinalizerFunc func(runtime.Object, []string) (runtime.Object, error)
type AddFinalizerFunc func(runtime.Object, []string) (runtime.Object, error) type AddFinalizerFunc func(runtime.Object, []string) (runtime.Object, error)
type ObjNameFunc func(runtime.Object) string type ObjNameFunc func(runtime.Object) string
@ -123,11 +123,8 @@ func (dh *DeletionHelper) HandleObjectInUnderlyingClusters(obj runtime.Object) (
// If the obj has FinalizerOrphan finalizer, then we need to orphan the // If the obj has FinalizerOrphan finalizer, then we need to orphan the
// corresponding objects in underlying clusters. // corresponding objects in underlying clusters.
// Just remove both the finalizers in that case. // Just remove both the finalizers in that case.
obj, err := dh.removeFinalizerFunc(obj, FinalizerDeleteFromUnderlyingClusters) finalizers := []string{FinalizerDeleteFromUnderlyingClusters, metav1.FinalizerOrphanDependents}
if err != nil { return dh.removeFinalizerFunc(obj, finalizers)
return obj, err
}
return dh.removeFinalizerFunc(obj, metav1.FinalizerOrphanDependents)
} }
glog.V(2).Infof("Deleting obj %s from underlying clusters", objName) glog.V(2).Infof("Deleting obj %s from underlying clusters", objName)
@ -183,5 +180,5 @@ func (dh *DeletionHelper) HandleObjectInUnderlyingClusters(obj runtime.Object) (
} }
// All done. Just remove the finalizer. // All done. Just remove the finalizer.
return dh.removeFinalizerFunc(obj, FinalizerDeleteFromUnderlyingClusters) return dh.removeFinalizerFunc(obj, []string{FinalizerDeleteFromUnderlyingClusters})
} }

View file

@ -0,0 +1,28 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package deletionhelper
// ContainsString returns true if the given string slice contains the given string.
// Returns false otherwise.
func ContainsString(arr []string, s string) bool {
for i := range arr {
if arr[i] == s {
return true
}
}
return false
}

View file

@ -922,7 +922,7 @@ func marshallOverrides(overrideArgString string) (map[string]string, error) {
argsMap := make(map[string]string) argsMap := make(map[string]string)
overrideArgs := strings.Split(overrideArgString, ",") overrideArgs := strings.Split(overrideArgString, ",")
for _, overrideArg := range overrideArgs { for _, overrideArg := range overrideArgs {
splitArg := strings.Split(overrideArg, "=") splitArg := strings.SplitN(overrideArg, "=", 2)
if len(splitArg) != 2 { if len(splitArg) != 2 {
return nil, fmt.Errorf("wrong format for override arg: %s", overrideArg) return nil, fmt.Errorf("wrong format for override arg: %s", overrideArg)
} }

View file

@ -322,8 +322,10 @@ func TestMarshallAndMergeOverrides(t *testing.T) {
expectedErr: "wrong format for override arg: wrong-format-arg", expectedErr: "wrong format for override arg: wrong-format-arg",
}, },
{ {
overrideParams: "wrong-format-arg=override=wrong-format-arg=override", // TODO: Multiple arg values separated by , are not supported yet
expectedErr: "wrong format for override arg: wrong-format-arg=override=wrong-format-arg=override", overrideParams: "multiple-equalto-char=first-key=1",
expectedSet: sets.NewString("arg2=val2", "arg1=val1", "multiple-equalto-char=first-key=1"),
expectedErr: "",
}, },
{ {
overrideParams: "=wrong-format-only-value", overrideParams: "=wrong-format-only-value",

View file

@ -194,7 +194,8 @@ func deleteConfigMapFromCluster(hostClientset internalclientset.Interface, secre
// deleteSecret deletes the secret with the given name from the host // deleteSecret deletes the secret with the given name from the host
// cluster. // cluster.
func deleteSecret(clientset internalclientset.Interface, name, namespace string) error { func deleteSecret(clientset internalclientset.Interface, name, namespace string) error {
return clientset.Core().Secrets(namespace).Delete(name, &metav1.DeleteOptions{}) orphanDependents := false
return clientset.Core().Secrets(namespace).Delete(name, &metav1.DeleteOptions{OrphanDependents: &orphanDependents})
} }
// isNotFound checks if the given error is a NotFound status error. // isNotFound checks if the given error is a NotFound status error.

View file

@ -36,10 +36,14 @@ function run_kube_apiserver() {
# Admission Controllers to invoke prior to persisting objects in cluster # Admission Controllers to invoke prior to persisting objects in cluster
ADMISSION_CONTROL="NamespaceLifecycle,LimitRanger,ResourceQuota" ADMISSION_CONTROL="NamespaceLifecycle,LimitRanger,ResourceQuota"
# Include RBAC (to exercise bootstrapping), and AlwaysAllow to allow all actions
AUTHORIZATION_MODE="RBAC,AlwaysAllow"
"${KUBE_OUTPUT_HOSTBIN}/kube-apiserver" \ "${KUBE_OUTPUT_HOSTBIN}/kube-apiserver" \
--address="127.0.0.1" \ --address="127.0.0.1" \
--public-address-override="127.0.0.1" \ --public-address-override="127.0.0.1" \
--port="${API_PORT}" \ --port="${API_PORT}" \
--authorization-mode="${AUTHORIZATION_MODE}" \
--admission-control="${ADMISSION_CONTROL}" \ --admission-control="${ADMISSION_CONTROL}" \
--etcd-servers="http://${ETCD_HOST}:${ETCD_PORT}" \ --etcd-servers="http://${ETCD_HOST}:${ETCD_PORT}" \
--public-address-override="127.0.0.1" \ --public-address-override="127.0.0.1" \

View file

@ -276,10 +276,10 @@ const (
AffinityAnnotationKey string = "scheduler.alpha.kubernetes.io/affinity" AffinityAnnotationKey string = "scheduler.alpha.kubernetes.io/affinity"
) )
// Tries to add a toleration to annotations list. Returns true if something was updated // AddOrUpdateTolerationInPodSpec tries to add a toleration to the toleration list in PodSpec.
// false otherwise. // Returns true if something was updated, false otherwise.
func AddOrUpdateTolerationInPod(pod *Pod, toleration *Toleration) (bool, error) { func AddOrUpdateTolerationInPodSpec(spec *PodSpec, toleration *Toleration) (bool, error) {
podTolerations := pod.Spec.Tolerations podTolerations := spec.Tolerations
var newTolerations []Toleration var newTolerations []Toleration
updated := false updated := false
@ -300,10 +300,16 @@ func AddOrUpdateTolerationInPod(pod *Pod, toleration *Toleration) (bool, error)
newTolerations = append(newTolerations, *toleration) newTolerations = append(newTolerations, *toleration)
} }
pod.Spec.Tolerations = newTolerations spec.Tolerations = newTolerations
return true, nil return true, nil
} }
// AddOrUpdateTolerationInPod tries to add a toleration to the pod's toleration list.
// Returns true if something was updated, false otherwise.
func AddOrUpdateTolerationInPod(pod *Pod, toleration *Toleration) (bool, error) {
return AddOrUpdateTolerationInPodSpec(&pod.Spec, toleration)
}
// MatchToleration checks if the toleration matches tolerationToMatch. Tolerations are unique by <key,effect,operator,value>, // MatchToleration checks if the toleration matches tolerationToMatch. Tolerations are unique by <key,effect,operator,value>,
// if the two tolerations have same <key,effect,operator,value> combination, regard as they match. // if the two tolerations have same <key,effect,operator,value> combination, regard as they match.
// TODO: uniqueness check for tolerations in api validations. // TODO: uniqueness check for tolerations in api validations.

View file

@ -233,6 +233,7 @@ type CronJobSpec struct {
StartingDeadlineSeconds *int64 StartingDeadlineSeconds *int64
// ConcurrencyPolicy specifies how to treat concurrent executions of a Job. // ConcurrencyPolicy specifies how to treat concurrent executions of a Job.
// Defaults to Allow.
// +optional // +optional
ConcurrencyPolicy ConcurrencyPolicy ConcurrencyPolicy ConcurrencyPolicy

View file

@ -41,7 +41,7 @@ func TestSetDefaultCronJob(t *testing.T) {
}, },
}, },
}, },
"nothing should be defaulted": { "set fields should not be defaulted": {
original: &CronJob{ original: &CronJob{
Spec: CronJobSpec{ Spec: CronJobSpec{
ConcurrencyPolicy: ForbidConcurrent, ConcurrencyPolicy: ForbidConcurrent,

View file

@ -72,6 +72,7 @@ message CronJobSpec {
optional int64 startingDeadlineSeconds = 2; optional int64 startingDeadlineSeconds = 2;
// ConcurrencyPolicy specifies how to treat concurrent executions of a Job. // ConcurrencyPolicy specifies how to treat concurrent executions of a Job.
// Defaults to Allow.
// +optional // +optional
optional string concurrencyPolicy = 3; optional string concurrencyPolicy = 3;

View file

@ -94,6 +94,7 @@ type CronJobSpec struct {
StartingDeadlineSeconds *int64 `json:"startingDeadlineSeconds,omitempty" protobuf:"varint,2,opt,name=startingDeadlineSeconds"` StartingDeadlineSeconds *int64 `json:"startingDeadlineSeconds,omitempty" protobuf:"varint,2,opt,name=startingDeadlineSeconds"`
// ConcurrencyPolicy specifies how to treat concurrent executions of a Job. // ConcurrencyPolicy specifies how to treat concurrent executions of a Job.
// Defaults to Allow.
// +optional // +optional
ConcurrencyPolicy ConcurrencyPolicy `json:"concurrencyPolicy,omitempty" protobuf:"bytes,3,opt,name=concurrencyPolicy,casttype=ConcurrencyPolicy"` ConcurrencyPolicy ConcurrencyPolicy `json:"concurrencyPolicy,omitempty" protobuf:"bytes,3,opt,name=concurrencyPolicy,casttype=ConcurrencyPolicy"`

View file

@ -52,7 +52,7 @@ var map_CronJobSpec = map[string]string{
"": "CronJobSpec describes how the job execution will look like and when it will actually run.", "": "CronJobSpec describes how the job execution will look like and when it will actually run.",
"schedule": "Schedule contains the schedule in Cron format, see https://en.wikipedia.org/wiki/Cron.", "schedule": "Schedule contains the schedule in Cron format, see https://en.wikipedia.org/wiki/Cron.",
"startingDeadlineSeconds": "Optional deadline in seconds for starting the job if it misses scheduled time for any reason. Missed jobs executions will be counted as failed ones.", "startingDeadlineSeconds": "Optional deadline in seconds for starting the job if it misses scheduled time for any reason. Missed jobs executions will be counted as failed ones.",
"concurrencyPolicy": "ConcurrencyPolicy specifies how to treat concurrent executions of a Job.", "concurrencyPolicy": "ConcurrencyPolicy specifies how to treat concurrent executions of a Job. Defaults to Allow.",
"suspend": "Suspend flag tells the controller to suspend subsequent executions, it does not apply to already started executions. Defaults to false.", "suspend": "Suspend flag tells the controller to suspend subsequent executions, it does not apply to already started executions. Defaults to false.",
"jobTemplate": "JobTemplate is the object that describes the job that will be created when executing a CronJob.", "jobTemplate": "JobTemplate is the object that describes the job that will be created when executing a CronJob.",
"successfulJobsHistoryLimit": "The number of successful finished jobs to retain. This is a pointer to distinguish between explicit zero and not specified.", "successfulJobsHistoryLimit": "The number of successful finished jobs to retain. This is a pointer to distinguish between explicit zero and not specified.",

View file

@ -20,8 +20,8 @@ import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
// IsDefaultStorageClassAnnotation represents a StorageClass annotation that // IsDefaultStorageClassAnnotation represents a StorageClass annotation that
// marks a class as the default StorageClass // marks a class as the default StorageClass
//TODO: Update IsDefaultStorageClassannotation and remove Beta when no longer used //TODO: remove Beta when no longer used
const IsDefaultStorageClassAnnotation = "storageclass.beta.kubernetes.io/is-default-class" const IsDefaultStorageClassAnnotation = "storageclass.kubernetes.io/is-default-class"
const BetaIsDefaultStorageClassAnnotation = "storageclass.beta.kubernetes.io/is-default-class" const BetaIsDefaultStorageClassAnnotation = "storageclass.beta.kubernetes.io/is-default-class"
// IsDefaultAnnotationText returns a pretty Yes/No String if // IsDefaultAnnotationText returns a pretty Yes/No String if

View file

@ -392,6 +392,10 @@ type CloudConfig struct {
// on a different aws account, on a different cloud provider or on-premise. // on a different aws account, on a different cloud provider or on-premise.
// If the flag is set also the KubernetesClusterTag must be provided // If the flag is set also the KubernetesClusterTag must be provided
VPC string VPC string
// SubnetID enables using a specific subnet to use for ELB's
SubnetID string
// RouteTableID enables using a specific RouteTable
RouteTableID string
// KubernetesClusterTag is the legacy cluster id we'll use to identify our cluster resources // KubernetesClusterTag is the legacy cluster id we'll use to identify our cluster resources
KubernetesClusterTag string KubernetesClusterTag string
@ -817,13 +821,14 @@ func newAWSCloud(config io.Reader, awsServices Services) (*Cloud, error) {
deviceAllocators: make(map[types.NodeName]DeviceAllocator), deviceAllocators: make(map[types.NodeName]DeviceAllocator),
} }
if cfg.Global.VPC != "" && cfg.Global.KubernetesClusterTag != "" { if cfg.Global.VPC != "" && cfg.Global.SubnetID != "" && (cfg.Global.KubernetesClusterTag != "" || cfg.Global.KubernetesClusterID != "") {
// When the master is running on a different AWS account, cloud provider or on-premise // When the master is running on a different AWS account, cloud provider or on-premise
// build up a dummy instance and use the VPC from the nodes account // build up a dummy instance and use the VPC from the nodes account
glog.Info("Master is configured to run on a AWS account, different cloud provider or on-premise") glog.Info("Master is configured to run on a different AWS account, different cloud provider or on-premise")
awsCloud.selfAWSInstance = &awsInstance{ awsCloud.selfAWSInstance = &awsInstance{
nodeName: "master-dummy", nodeName: "master-dummy",
vpcID: cfg.Global.VPC, vpcID: cfg.Global.VPC,
subnetID: cfg.Global.SubnetID,
} }
awsCloud.vpcID = cfg.Global.VPC awsCloud.vpcID = cfg.Global.VPC
} else { } else {

View file

@ -29,17 +29,27 @@ func (c *Cloud) findRouteTable(clusterName string) (*ec2.RouteTable, error) {
// This should be unnecessary (we already filter on TagNameKubernetesCluster, // This should be unnecessary (we already filter on TagNameKubernetesCluster,
// and something is broken if cluster name doesn't match, but anyway... // and something is broken if cluster name doesn't match, but anyway...
// TODO: All clouds should be cluster-aware by default // TODO: All clouds should be cluster-aware by default
request := &ec2.DescribeRouteTablesInput{Filters: c.tagging.addFilters(nil)}
response, err := c.ec2.DescribeRouteTables(request)
if err != nil {
return nil, err
}
var tables []*ec2.RouteTable var tables []*ec2.RouteTable
for _, table := range response {
if c.tagging.hasClusterTag(table.Tags) { if c.cfg.Global.RouteTableID != "" {
tables = append(tables, table) request := &ec2.DescribeRouteTablesInput{Filters: []*ec2.Filter{newEc2Filter("route-table-id", c.cfg.Global.RouteTableID)}}
response, err := c.ec2.DescribeRouteTables(request)
if err != nil {
return nil, err
}
tables = response
} else {
request := &ec2.DescribeRouteTablesInput{Filters: c.tagging.addFilters(nil)}
response, err := c.ec2.DescribeRouteTables(request)
if err != nil {
return nil, err
}
for _, table := range response {
if c.tagging.hasClusterTag(table.Tags) {
tables = append(tables, table)
}
} }
} }

View file

@ -28,6 +28,7 @@ go_library(
"//pkg/api/v1:go_default_library", "//pkg/api/v1:go_default_library",
"//pkg/api/v1/service:go_default_library", "//pkg/api/v1/service:go_default_library",
"//pkg/cloudprovider:go_default_library", "//pkg/cloudprovider:go_default_library",
"//pkg/version:go_default_library",
"//pkg/volume:go_default_library", "//pkg/volume:go_default_library",
"//vendor:github.com/Azure/azure-sdk-for-go/arm/compute", "//vendor:github.com/Azure/azure-sdk-for-go/arm/compute",
"//vendor:github.com/Azure/azure-sdk-for-go/arm/network", "//vendor:github.com/Azure/azure-sdk-for-go/arm/network",

View file

@ -17,14 +17,17 @@ limitations under the License.
package azure package azure
import ( import (
"fmt"
"io" "io"
"io/ioutil" "io/ioutil"
"k8s.io/kubernetes/pkg/cloudprovider" "k8s.io/kubernetes/pkg/cloudprovider"
"k8s.io/kubernetes/pkg/version"
"github.com/Azure/azure-sdk-for-go/arm/compute" "github.com/Azure/azure-sdk-for-go/arm/compute"
"github.com/Azure/azure-sdk-for-go/arm/network" "github.com/Azure/azure-sdk-for-go/arm/network"
"github.com/Azure/azure-sdk-for-go/arm/storage" "github.com/Azure/azure-sdk-for-go/arm/storage"
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/azure" "github.com/Azure/go-autorest/autorest/azure"
"github.com/ghodss/yaml" "github.com/ghodss/yaml"
"time" "time"
@ -125,38 +128,54 @@ func NewCloud(configReader io.Reader) (cloudprovider.Interface, error) {
az.SubnetsClient = network.NewSubnetsClient(az.SubscriptionID) az.SubnetsClient = network.NewSubnetsClient(az.SubscriptionID)
az.SubnetsClient.BaseURI = az.Environment.ResourceManagerEndpoint az.SubnetsClient.BaseURI = az.Environment.ResourceManagerEndpoint
az.SubnetsClient.Authorizer = servicePrincipalToken az.SubnetsClient.Authorizer = servicePrincipalToken
az.SubnetsClient.PollingDelay = 5 * time.Second
configureUserAgent(&az.SubnetsClient.Client)
az.RouteTablesClient = network.NewRouteTablesClient(az.SubscriptionID) az.RouteTablesClient = network.NewRouteTablesClient(az.SubscriptionID)
az.RouteTablesClient.BaseURI = az.Environment.ResourceManagerEndpoint az.RouteTablesClient.BaseURI = az.Environment.ResourceManagerEndpoint
az.RouteTablesClient.Authorizer = servicePrincipalToken az.RouteTablesClient.Authorizer = servicePrincipalToken
az.RouteTablesClient.PollingDelay = 5 * time.Second
configureUserAgent(&az.RouteTablesClient.Client)
az.RoutesClient = network.NewRoutesClient(az.SubscriptionID) az.RoutesClient = network.NewRoutesClient(az.SubscriptionID)
az.RoutesClient.BaseURI = az.Environment.ResourceManagerEndpoint az.RoutesClient.BaseURI = az.Environment.ResourceManagerEndpoint
az.RoutesClient.Authorizer = servicePrincipalToken az.RoutesClient.Authorizer = servicePrincipalToken
az.RoutesClient.PollingDelay = 5 * time.Second
configureUserAgent(&az.RoutesClient.Client)
az.InterfacesClient = network.NewInterfacesClient(az.SubscriptionID) az.InterfacesClient = network.NewInterfacesClient(az.SubscriptionID)
az.InterfacesClient.BaseURI = az.Environment.ResourceManagerEndpoint az.InterfacesClient.BaseURI = az.Environment.ResourceManagerEndpoint
az.InterfacesClient.Authorizer = servicePrincipalToken az.InterfacesClient.Authorizer = servicePrincipalToken
az.InterfacesClient.PollingDelay = 5 * time.Second
configureUserAgent(&az.InterfacesClient.Client)
az.LoadBalancerClient = network.NewLoadBalancersClient(az.SubscriptionID) az.LoadBalancerClient = network.NewLoadBalancersClient(az.SubscriptionID)
az.LoadBalancerClient.BaseURI = az.Environment.ResourceManagerEndpoint az.LoadBalancerClient.BaseURI = az.Environment.ResourceManagerEndpoint
az.LoadBalancerClient.Authorizer = servicePrincipalToken az.LoadBalancerClient.Authorizer = servicePrincipalToken
az.LoadBalancerClient.PollingDelay = 5 * time.Second
configureUserAgent(&az.LoadBalancerClient.Client)
az.VirtualMachinesClient = compute.NewVirtualMachinesClient(az.SubscriptionID) az.VirtualMachinesClient = compute.NewVirtualMachinesClient(az.SubscriptionID)
az.VirtualMachinesClient.BaseURI = az.Environment.ResourceManagerEndpoint az.VirtualMachinesClient.BaseURI = az.Environment.ResourceManagerEndpoint
az.VirtualMachinesClient.Authorizer = servicePrincipalToken az.VirtualMachinesClient.Authorizer = servicePrincipalToken
az.VirtualMachinesClient.PollingDelay = 5 * time.Second az.VirtualMachinesClient.PollingDelay = 5 * time.Second
configureUserAgent(&az.VirtualMachinesClient.Client)
az.PublicIPAddressesClient = network.NewPublicIPAddressesClient(az.SubscriptionID) az.PublicIPAddressesClient = network.NewPublicIPAddressesClient(az.SubscriptionID)
az.PublicIPAddressesClient.BaseURI = az.Environment.ResourceManagerEndpoint az.PublicIPAddressesClient.BaseURI = az.Environment.ResourceManagerEndpoint
az.PublicIPAddressesClient.Authorizer = servicePrincipalToken az.PublicIPAddressesClient.Authorizer = servicePrincipalToken
az.PublicIPAddressesClient.PollingDelay = 5 * time.Second
configureUserAgent(&az.PublicIPAddressesClient.Client)
az.SecurityGroupsClient = network.NewSecurityGroupsClient(az.SubscriptionID) az.SecurityGroupsClient = network.NewSecurityGroupsClient(az.SubscriptionID)
az.SecurityGroupsClient.BaseURI = az.Environment.ResourceManagerEndpoint az.SecurityGroupsClient.BaseURI = az.Environment.ResourceManagerEndpoint
az.SecurityGroupsClient.Authorizer = servicePrincipalToken az.SecurityGroupsClient.Authorizer = servicePrincipalToken
az.SecurityGroupsClient.PollingDelay = 5 * time.Second
configureUserAgent(&az.SecurityGroupsClient.Client)
az.StorageAccountClient = storage.NewAccountsClientWithBaseURI(az.Environment.ResourceManagerEndpoint, az.SubscriptionID) az.StorageAccountClient = storage.NewAccountsClientWithBaseURI(az.Environment.ResourceManagerEndpoint, az.SubscriptionID)
az.StorageAccountClient.Authorizer = servicePrincipalToken az.StorageAccountClient.Authorizer = servicePrincipalToken
return &az, nil return &az, nil
} }
@ -194,3 +213,8 @@ func (az *Cloud) ScrubDNS(nameservers, searches []string) (nsOut, srchOut []stri
func (az *Cloud) ProviderName() string { func (az *Cloud) ProviderName() string {
return CloudProviderName return CloudProviderName
} }
func configureUserAgent(client *autorest.Client) {
k8sVersion := version.Get().GitVersion
client.UserAgent = fmt.Sprintf("%s; %s", client.UserAgent, k8sVersion)
}

View file

@ -24,9 +24,11 @@ import (
"net/url" "net/url"
"path" "path"
"path/filepath" "path/filepath"
"regexp"
"runtime" "runtime"
"strings" "strings"
"sync" "sync"
"time"
"gopkg.in/gcfg.v1" "gopkg.in/gcfg.v1"
@ -49,23 +51,33 @@ import (
) )
const ( const (
ProviderName = "vsphere" ProviderName = "vsphere"
ActivePowerState = "poweredOn" ActivePowerState = "poweredOn"
SCSIControllerType = "scsi" SCSIControllerType = "scsi"
LSILogicControllerType = "lsiLogic" LSILogicControllerType = "lsiLogic"
BusLogicControllerType = "busLogic" BusLogicControllerType = "busLogic"
PVSCSIControllerType = "pvscsi" PVSCSIControllerType = "pvscsi"
LSILogicSASControllerType = "lsiLogic-sas" LSILogicSASControllerType = "lsiLogic-sas"
SCSIControllerLimit = 4 SCSIControllerLimit = 4
SCSIControllerDeviceLimit = 15 SCSIControllerDeviceLimit = 15
SCSIDeviceSlots = 16 SCSIDeviceSlots = 16
SCSIReservedSlot = 7 SCSIReservedSlot = 7
ThinDiskType = "thin" ThinDiskType = "thin"
PreallocatedDiskType = "preallocated" PreallocatedDiskType = "preallocated"
EagerZeroedThickDiskType = "eagerZeroedThick" EagerZeroedThickDiskType = "eagerZeroedThick"
ZeroedThickDiskType = "zeroedThick" ZeroedThickDiskType = "zeroedThick"
VolDir = "kubevols" VolDir = "kubevols"
RoundTripperDefaultCount = 3 RoundTripperDefaultCount = 3
DummyVMPrefixName = "vsphere-k8s"
VSANDatastoreType = "vsan"
MAC_OUI_VC = "00:50:56"
MAC_OUI_ESX = "00:0c:29"
DiskNotFoundErrMsg = "No vSphere disk ID found"
NoDiskUUIDFoundErrMsg = "No disk UUID found"
NoDevicesFoundErrMsg = "No devices found"
NonSupportedControllerTypeErrMsg = "Disk is attached to non-supported controller type"
FileAlreadyExistErrMsg = "File requested already exist"
CleanUpDummyVMRoutine_Interval = 5
) )
// Controller types that are currently supported for hot attach of disks // Controller types that are currently supported for hot attach of disks
@ -85,14 +97,17 @@ var diskFormatValidType = map[string]string{
} }
var DiskformatValidOptions = generateDiskFormatValidOptions() var DiskformatValidOptions = generateDiskFormatValidOptions()
var cleanUpRoutineInitialized = false
var ErrNoDiskUUIDFound = errors.New("No disk UUID found") var ErrNoDiskUUIDFound = errors.New(NoDiskUUIDFoundErrMsg)
var ErrNoDiskIDFound = errors.New("No vSphere disk ID found") var ErrNoDiskIDFound = errors.New(DiskNotFoundErrMsg)
var ErrNoDevicesFound = errors.New("No devices found") var ErrNoDevicesFound = errors.New(NoDevicesFoundErrMsg)
var ErrNonSupportedControllerType = errors.New("Disk is attached to non-supported controller type") var ErrNonSupportedControllerType = errors.New(NonSupportedControllerTypeErrMsg)
var ErrFileAlreadyExist = errors.New("File requested already exist") var ErrFileAlreadyExist = errors.New(FileAlreadyExistErrMsg)
var clientLock sync.Mutex var clientLock sync.Mutex
var cleanUpRoutineInitLock sync.Mutex
var cleanUpDummyVMLock sync.RWMutex
// VSphere is an implementation of cloud provider Interface for VSphere. // VSphere is an implementation of cloud provider Interface for VSphere.
type VSphere struct { type VSphere struct {
@ -166,11 +181,12 @@ type Volumes interface {
// VolumeOptions specifies capacity, tags, name and diskFormat for a volume. // VolumeOptions specifies capacity, tags, name and diskFormat for a volume.
type VolumeOptions struct { type VolumeOptions struct {
CapacityKB int CapacityKB int
Tags map[string]string Tags map[string]string
Name string Name string
DiskFormat string DiskFormat string
Datastore string Datastore string
StorageProfileData string
} }
// Generates Valid Options for Diskformat // Generates Valid Options for Diskformat
@ -687,6 +703,8 @@ func cleanUpController(ctx context.Context, newSCSIController types.BaseVirtualD
// Attaches given virtual disk volume to the compute running kubelet. // Attaches given virtual disk volume to the compute running kubelet.
func (vs *VSphere) AttachDisk(vmDiskPath string, nodeName k8stypes.NodeName) (diskID string, diskUUID string, err error) { func (vs *VSphere) AttachDisk(vmDiskPath string, nodeName k8stypes.NodeName) (diskID string, diskUUID string, err error) {
var newSCSIController types.BaseVirtualDevice
// Create context // Create context
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())
defer cancel() defer cancel()
@ -722,50 +740,24 @@ func (vs *VSphere) AttachDisk(vmDiskPath string, nodeName k8stypes.NodeName) (di
var diskControllerType = vs.cfg.Disk.SCSIControllerType var diskControllerType = vs.cfg.Disk.SCSIControllerType
// find SCSI controller of particular type from VM devices // find SCSI controller of particular type from VM devices
allSCSIControllers := getSCSIControllers(vmDevices)
scsiControllersOfRequiredType := getSCSIControllersOfType(vmDevices, diskControllerType) scsiControllersOfRequiredType := getSCSIControllersOfType(vmDevices, diskControllerType)
scsiController := getAvailableSCSIController(scsiControllersOfRequiredType) scsiController := getAvailableSCSIController(scsiControllersOfRequiredType)
newSCSICreated := false
var newSCSICreated = false
var newSCSIController types.BaseVirtualDevice
// creating a scsi controller as there is none found of controller type defined
if scsiController == nil { if scsiController == nil {
if len(allSCSIControllers) >= SCSIControllerLimit { newSCSIController, err = createAndAttachSCSIControllerToVM(ctx, vm, diskControllerType)
// we reached the maximum number of controllers we can attach
return "", "", fmt.Errorf("SCSI Controller Limit of %d has been reached, cannot create another SCSI controller", SCSIControllerLimit)
}
glog.V(1).Infof("Creating a SCSI controller of %v type", diskControllerType)
newSCSIController, err := vmDevices.CreateSCSIController(diskControllerType)
if err != nil { if err != nil {
k8runtime.HandleError(fmt.Errorf("error creating new SCSI controller: %v", err)) glog.Errorf("Failed to create SCSI controller for VM :%q with err: %+v", vm.Name(), err)
return "", "", err
}
configNewSCSIController := newSCSIController.(types.BaseVirtualSCSIController).GetVirtualSCSIController()
hotAndRemove := true
configNewSCSIController.HotAddRemove = &hotAndRemove
configNewSCSIController.SharedBus = types.VirtualSCSISharing(types.VirtualSCSISharingNoSharing)
// add the scsi controller to virtual machine
err = vm.AddDevice(context.TODO(), newSCSIController)
if err != nil {
glog.V(1).Infof("cannot add SCSI controller to vm - %v", err)
// attempt clean up of scsi controller
if vmDevices, err := vm.Device(ctx); err == nil {
cleanUpController(ctx, newSCSIController, vmDevices, vm)
}
return "", "", err return "", "", err
} }
// verify scsi controller in virtual machine // verify scsi controller in virtual machine
vmDevices, err = vm.Device(ctx) vmDevices, err := vm.Device(ctx)
if err != nil { if err != nil {
// cannot cleanup if there is no device list
return "", "", err return "", "", err
} }
// Get VM device list // Get VM device list
_, vmDevices, _, err := getVirtualMachineDevices(ctx, vs.cfg, vs.client, vSphereInstance) _, vmDevices, _, err = getVirtualMachineDevices(ctx, vs.cfg, vs.client, vSphereInstance)
if err != nil { if err != nil {
glog.Errorf("cannot get vmDevices for VM err=%s", err) glog.Errorf("cannot get vmDevices for VM err=%s", err)
return "", "", fmt.Errorf("cannot get vmDevices for VM err=%s", err) return "", "", fmt.Errorf("cannot get vmDevices for VM err=%s", err)
@ -798,7 +790,7 @@ func (vs *VSphere) AttachDisk(vmDiskPath string, nodeName k8stypes.NodeName) (di
glog.Errorf("Failed while searching for datastore %+q. err %s", datastorePathObj.Datastore, err) glog.Errorf("Failed while searching for datastore %+q. err %s", datastorePathObj.Datastore, err)
return "", "", err return "", "", err
} }
vmDiskPath = removeClusterFromVDiskPath(vmDiskPath)
disk := vmDevices.CreateDisk(scsiController, ds.Reference(), vmDiskPath) disk := vmDevices.CreateDisk(scsiController, ds.Reference(), vmDiskPath)
unitNumber, err := getNextUnitNumber(vmDevices, scsiController) unitNumber, err := getNextUnitNumber(vmDevices, scsiController)
if err != nil { if err != nil {
@ -1045,6 +1037,7 @@ func checkDiskAttached(volPath string, vmdevices object.VirtualDeviceList, dc *o
// Returns the object key that denotes the controller object to which vmdk is attached. // Returns the object key that denotes the controller object to which vmdk is attached.
func getVirtualDiskControllerKey(volPath string, vmDevices object.VirtualDeviceList, dc *object.Datacenter, client *govmomi.Client) (int32, error) { func getVirtualDiskControllerKey(volPath string, vmDevices object.VirtualDeviceList, dc *object.Datacenter, client *govmomi.Client) (int32, error) {
volPath = removeClusterFromVDiskPath(volPath)
volumeUUID, err := getVirtualDiskUUIDByPath(volPath, dc, client) volumeUUID, err := getVirtualDiskUUIDByPath(volPath, dc, client)
if err != nil { if err != nil {
@ -1175,7 +1168,7 @@ func (vs *VSphere) DetachDisk(volPath string, nodeName k8stypes.NodeName) error
if err != nil { if err != nil {
return err return err
} }
volPath = removeClusterFromVDiskPath(volPath)
diskID, err := getVirtualDiskID(volPath, vmDevices, dc, vs.client) diskID, err := getVirtualDiskID(volPath, vmDevices, dc, vs.client)
if err != nil { if err != nil {
glog.Warningf("disk ID not found for %v ", volPath) glog.Warningf("disk ID not found for %v ", volPath)
@ -1200,8 +1193,8 @@ func (vs *VSphere) DetachDisk(volPath string, nodeName k8stypes.NodeName) error
// CreateVolume creates a volume of given size (in KiB). // CreateVolume creates a volume of given size (in KiB).
func (vs *VSphere) CreateVolume(volumeOptions *VolumeOptions) (volumePath string, err error) { func (vs *VSphere) CreateVolume(volumeOptions *VolumeOptions) (volumePath string, err error) {
var diskFormat string
var datastore string var datastore string
var destVolPath string
// Default datastore is the datastore in the vSphere config file that is used initialize vSphere cloud provider. // Default datastore is the datastore in the vSphere config file that is used initialize vSphere cloud provider.
if volumeOptions.Datastore == "" { if volumeOptions.Datastore == "" {
@ -1220,8 +1213,6 @@ func (vs *VSphere) CreateVolume(volumeOptions *VolumeOptions) (volumePath string
" Valid options are %s.", volumeOptions.DiskFormat, DiskformatValidOptions) " Valid options are %s.", volumeOptions.DiskFormat, DiskformatValidOptions)
} }
diskFormat = diskFormatValidType[volumeOptions.DiskFormat]
// Create context // Create context
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())
defer cancel() defer cancel()
@ -1246,43 +1237,105 @@ func (vs *VSphere) CreateVolume(volumeOptions *VolumeOptions) (volumePath string
return "", err return "", err
} }
// vmdks will be created inside kubevols directory // Create a disk with the VSAN storage capabilities specified in the volumeOptions.StorageProfileData.
kubeVolsPath := filepath.Clean(ds.Path(VolDir)) + "/" // This is achieved by following steps:
err = makeDirectoryInDatastore(vs.client, dc, kubeVolsPath, false) // 1. Create dummy VM if not already present.
if err != nil && err != ErrFileAlreadyExist { // 2. Add a new disk to the VM by performing VM reconfigure.
glog.Errorf("Cannot create dir %#v. err %s", kubeVolsPath, err) // 3. Detach the new disk from the dummy VM.
return "", err // 4. Delete the dummy VM.
} if volumeOptions.StorageProfileData != "" {
glog.V(4).Infof("Created dir with path as %+q", kubeVolsPath) // Check if the datastore is VSAN if any capability requirements are specified.
// VSphere cloud provider now only supports VSAN capabilities requirements
ok, err := checkIfDatastoreTypeIsVSAN(vs.client, ds)
if err != nil {
return "", fmt.Errorf("Failed while determining whether the datastore: %q"+
" is VSAN or not.", datastore)
}
if !ok {
return "", fmt.Errorf("The specified datastore: %q is not a VSAN datastore."+
" The policy parameters will work only with VSAN Datastore."+
" So, please specify a valid VSAN datastore in Storage class definition.", datastore)
}
vmDiskPath := kubeVolsPath + volumeOptions.Name + ".vmdk" // Acquire a read lock to ensure multiple PVC requests can be processed simultaneously.
cleanUpDummyVMLock.RLock()
defer cleanUpDummyVMLock.RUnlock()
// Create a virtual disk manager // Create a new background routine that will delete any dummy VM's that are left stale.
virtualDiskManager := object.NewVirtualDiskManager(vs.client.Client) // This routine will get executed for every 5 minutes and gets initiated only once in its entire lifetime.
cleanUpRoutineInitLock.Lock()
if !cleanUpRoutineInitialized {
go vs.cleanUpDummyVMs(DummyVMPrefixName)
cleanUpRoutineInitialized = true
}
cleanUpRoutineInitLock.Unlock()
// Create specification for new virtual disk // Check if the VM exists in kubernetes cluster folder.
vmDiskSpec := &types.FileBackedVirtualDiskSpec{ // The kubernetes cluster folder - vs.cfg.Global.WorkingDir is where all the nodes in the kubernetes cluster are created.
VirtualDiskSpec: types.VirtualDiskSpec{ dummyVMFullName := DummyVMPrefixName + "-" + volumeOptions.Name
AdapterType: LSILogicControllerType, vmRegex := vs.cfg.Global.WorkingDir + dummyVMFullName
DiskType: diskFormat, dummyVM, err := f.VirtualMachine(ctx, vmRegex)
}, if err != nil {
CapacityKb: int64(volumeOptions.CapacityKB), // 1. Create a dummy VM and return the VM reference.
dummyVM, err = vs.createDummyVM(ctx, dc, ds, dummyVMFullName)
if err != nil {
return "", err
}
}
// 2. Reconfigure the VM to attach the disk with the VSAN policy configured.
vmDiskPath, err := vs.createVirtualDiskWithPolicy(ctx, dc, ds, dummyVM, volumeOptions)
fileAlreadyExist := false
if err != nil {
vmDiskPath = filepath.Clean(ds.Path(VolDir)) + "/" + volumeOptions.Name + ".vmdk"
errorMessage := fmt.Sprintf("Cannot complete the operation because the file or folder %s already exists", vmDiskPath)
if errorMessage == err.Error() {
//Skip error and continue to detach the disk as the disk was already created on the datastore.
fileAlreadyExist = true
glog.V(1).Infof("File: %v already exists", vmDiskPath)
} else {
glog.Errorf("Failed to attach the disk to VM: %q with err: %+v", dummyVMFullName, err)
return "", err
}
}
dummyVMNodeName := vmNameToNodeName(dummyVMFullName)
// 3. Detach the disk from the dummy VM.
err = vs.DetachDisk(vmDiskPath, dummyVMNodeName)
if err != nil {
if DiskNotFoundErrMsg == err.Error() && fileAlreadyExist {
// Skip error if disk was already detached from the dummy VM but still present on the datastore.
glog.V(1).Infof("File: %v is already detached", vmDiskPath)
} else {
glog.Errorf("Failed to detach the disk: %q from VM: %q with err: %+v", vmDiskPath, dummyVMFullName, err)
return "", fmt.Errorf("Failed to create the volume: %q with err: %+v", volumeOptions.Name, err)
}
}
// 4. Delete the dummy VM
err = deleteVM(ctx, dummyVM)
if err != nil {
return "", fmt.Errorf("Failed to destroy the vm: %q with err: %+v", dummyVMFullName, err)
}
destVolPath = vmDiskPath
} else {
// Create a virtual disk directly if no VSAN storage capabilities are specified by the user.
destVolPath, err = createVirtualDisk(ctx, vs.client, dc, ds, volumeOptions)
if err != nil {
return "", fmt.Errorf("Failed to create the virtual disk having name: %+q with err: %+v", destVolPath, err)
}
} }
// Create virtual disk if filepath.Base(datastore) != datastore {
task, err := virtualDiskManager.CreateVirtualDisk(ctx, vmDiskPath, dc, vmDiskSpec) // If Datastore is within cluster, add cluster path to the destVolPath
if err != nil { destVolPath = strings.Replace(destVolPath, filepath.Base(datastore), datastore, 1)
return "", err
} }
err = task.Wait(ctx) glog.V(1).Infof("VM Disk path is %+q", destVolPath)
if err != nil { return destVolPath, nil
return "", err
}
return vmDiskPath, nil
} }
// DeleteVolume deletes a volume given volume name. // DeleteVolume deletes a volume given volume name.
// Also, deletes the folder where the volume resides.
func (vs *VSphere) DeleteVolume(vmDiskPath string) error { func (vs *VSphere) DeleteVolume(vmDiskPath string) error {
// Create context // Create context
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())
@ -1308,7 +1361,24 @@ func (vs *VSphere) DeleteVolume(vmDiskPath string) error {
if filepath.Ext(vmDiskPath) != ".vmdk" { if filepath.Ext(vmDiskPath) != ".vmdk" {
vmDiskPath += ".vmdk" vmDiskPath += ".vmdk"
} }
// Get the vmDisk Name
diskNameWithExt := path.Base(vmDiskPath)
diskName := strings.TrimSuffix(diskNameWithExt, filepath.Ext(diskNameWithExt))
// Search for the dummyVM if present and delete it.
dummyVMFullName := DummyVMPrefixName + "-" + diskName
vmRegex := vs.cfg.Global.WorkingDir + dummyVMFullName
dummyVM, err := f.VirtualMachine(ctx, vmRegex)
if err == nil {
err = deleteVM(ctx, dummyVM)
if err != nil {
return fmt.Errorf("Failed to destroy the vm: %q with err: %+v", dummyVMFullName, err)
}
}
// Delete virtual disk // Delete virtual disk
vmDiskPath = removeClusterFromVDiskPath(vmDiskPath)
task, err := virtualDiskManager.DeleteVirtualDisk(ctx, vmDiskPath, dc) task, err := virtualDiskManager.DeleteVirtualDisk(ctx, vmDiskPath, dc)
if err != nil { if err != nil {
return err return err
@ -1356,6 +1426,341 @@ func (vs *VSphere) NodeExists(c *govmomi.Client, nodeName k8stypes.NodeName) (bo
return false, nil return false, nil
} }
// A background routine which will be responsible for deleting stale dummy VM's.
func (vs *VSphere) cleanUpDummyVMs(dummyVMPrefix string) {
// Create context
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
for {
time.Sleep(CleanUpDummyVMRoutine_Interval * time.Minute)
// Ensure client is logged in and session is valid
err := vSphereLogin(ctx, vs)
if err != nil {
glog.V(4).Infof("[cleanUpDummyVMs] Unable to login to vSphere with err: %+v", err)
continue
}
// Create a new finder
f := find.NewFinder(vs.client.Client, true)
// Fetch and set data center
dc, err := f.Datacenter(ctx, vs.cfg.Global.Datacenter)
if err != nil {
glog.V(4).Infof("[cleanUpDummyVMs] Unable to fetch the datacenter: %q with err: %+v", vs.cfg.Global.Datacenter, err)
continue
}
f.SetDatacenter(dc)
// Get the folder reference for global working directory where the dummy VM needs to be created.
vmFolder, err := getFolder(ctx, vs.client, vs.cfg.Global.Datacenter, vs.cfg.Global.WorkingDir)
if err != nil {
glog.V(4).Infof("[cleanUpDummyVMs] Unable to get the kubernetes folder: %q reference with err: %+v", vs.cfg.Global.WorkingDir, err)
continue
}
// A write lock is acquired to make sure the cleanUp routine doesn't delete any VM's created by ongoing PVC requests.
cleanUpDummyVMLock.Lock()
dummyVMRefList, err := getDummyVMList(ctx, vs.client, vmFolder, dummyVMPrefix)
if err != nil {
glog.V(4).Infof("[cleanUpDummyVMs] Unable to get dummy VM list in the kubernetes cluster: %q reference with err: %+v", vs.cfg.Global.WorkingDir, err)
cleanUpDummyVMLock.Unlock()
continue
}
for _, dummyVMRef := range dummyVMRefList {
err = deleteVM(ctx, dummyVMRef)
if err != nil {
glog.V(4).Infof("[cleanUpDummyVMs] Unable to delete dummy VM: %q with err: %+v", dummyVMRef.Name(), err)
continue
}
}
cleanUpDummyVMLock.Unlock()
}
}
// Get the dummy VM list from the kubernetes working directory.
func getDummyVMList(ctx context.Context, c *govmomi.Client, vmFolder *object.Folder, dummyVMPrefix string) ([]*object.VirtualMachine, error) {
vmFolders, err := vmFolder.Children(ctx)
if err != nil {
glog.V(4).Infof("Unable to retrieve the virtual machines from the kubernetes cluster: %+v", vmFolder)
return nil, err
}
var dummyVMRefList []*object.VirtualMachine
pc := property.DefaultCollector(c.Client)
for _, vmFolder := range vmFolders {
if vmFolder.Reference().Type == "VirtualMachine" {
var vmRefs []types.ManagedObjectReference
var vmMorefs []mo.VirtualMachine
vmRefs = append(vmRefs, vmFolder.Reference())
err = pc.Retrieve(ctx, vmRefs, []string{"name"}, &vmMorefs)
if err != nil {
return nil, err
}
if strings.HasPrefix(vmMorefs[0].Name, dummyVMPrefix) {
dummyVMRefList = append(dummyVMRefList, object.NewVirtualMachine(c.Client, vmRefs[0]))
}
}
}
return dummyVMRefList, nil
}
func (vs *VSphere) createDummyVM(ctx context.Context, datacenter *object.Datacenter, datastore *object.Datastore, vmName string) (*object.VirtualMachine, error) {
// Create a virtual machine config spec with 1 SCSI adapter.
virtualMachineConfigSpec := types.VirtualMachineConfigSpec{
Name: vmName,
Files: &types.VirtualMachineFileInfo{
VmPathName: "[" + datastore.Name() + "]",
},
NumCPUs: 1,
MemoryMB: 4,
DeviceChange: []types.BaseVirtualDeviceConfigSpec{
&types.VirtualDeviceConfigSpec{
Operation: types.VirtualDeviceConfigSpecOperationAdd,
Device: &types.ParaVirtualSCSIController{
VirtualSCSIController: types.VirtualSCSIController{
SharedBus: types.VirtualSCSISharingNoSharing,
VirtualController: types.VirtualController{
BusNumber: 0,
VirtualDevice: types.VirtualDevice{
Key: 1000,
},
},
},
},
},
},
}
// Get the resource pool for current node. This is where dummy VM will be created.
resourcePool, err := vs.getCurrentNodeResourcePool(ctx, datacenter)
if err != nil {
return nil, err
}
// Get the folder reference for global working directory where the dummy VM needs to be created.
vmFolder, err := getFolder(ctx, vs.client, vs.cfg.Global.Datacenter, vs.cfg.Global.WorkingDir)
if err != nil {
return nil, fmt.Errorf("Failed to get the folder reference for %q with err: %+v", vs.cfg.Global.WorkingDir, err)
}
task, err := vmFolder.CreateVM(ctx, virtualMachineConfigSpec, resourcePool, nil)
if err != nil {
return nil, err
}
dummyVMTaskInfo, err := task.WaitForResult(ctx, nil)
if err != nil {
return nil, err
}
vmRef := dummyVMTaskInfo.Result.(object.Reference)
dummyVM := object.NewVirtualMachine(vs.client.Client, vmRef.Reference())
return dummyVM, nil
}
func (vs *VSphere) getCurrentNodeResourcePool(ctx context.Context, datacenter *object.Datacenter) (*object.ResourcePool, error) {
// Create a new finder
f := find.NewFinder(vs.client.Client, true)
f.SetDatacenter(datacenter)
vmRegex := vs.cfg.Global.WorkingDir + vs.localInstanceID
currentVM, err := f.VirtualMachine(ctx, vmRegex)
if err != nil {
return nil, err
}
currentVMHost, err := currentVM.HostSystem(ctx)
if err != nil {
return nil, err
}
// Get the resource pool for the current node.
// We create the dummy VM in the same resource pool as current node.
resourcePool, err := currentVMHost.ResourcePool(ctx)
if err != nil {
return nil, err
}
return resourcePool, nil
}
// Creates a virtual disk with the policy configured to the disk.
// A call to this function is made only when a user specifies VSAN storage capabilties in the storage class definition.
func (vs *VSphere) createVirtualDiskWithPolicy(ctx context.Context, datacenter *object.Datacenter, datastore *object.Datastore, virtualMachine *object.VirtualMachine, volumeOptions *VolumeOptions) (string, error) {
var diskFormat string
diskFormat = diskFormatValidType[volumeOptions.DiskFormat]
vmDevices, err := virtualMachine.Device(ctx)
if err != nil {
return "", err
}
var diskControllerType = vs.cfg.Disk.SCSIControllerType
// find SCSI controller of particular type from VM devices
scsiControllersOfRequiredType := getSCSIControllersOfType(vmDevices, diskControllerType)
scsiController := scsiControllersOfRequiredType[0]
kubeVolsPath := filepath.Clean(datastore.Path(VolDir)) + "/"
// Create a kubevols directory in the datastore if one doesn't exist.
err = makeDirectoryInDatastore(vs.client, datacenter, kubeVolsPath, false)
if err != nil && err != ErrFileAlreadyExist {
glog.Errorf("Cannot create dir %#v. err %s", kubeVolsPath, err)
return "", err
}
glog.V(4).Infof("Created dir with path as %+q", kubeVolsPath)
vmDiskPath := kubeVolsPath + volumeOptions.Name + ".vmdk"
disk := vmDevices.CreateDisk(scsiController, datastore.Reference(), vmDiskPath)
unitNumber, err := getNextUnitNumber(vmDevices, scsiController)
if err != nil {
glog.Errorf("cannot attach disk to VM, limit reached - %v.", err)
return "", err
}
*disk.UnitNumber = unitNumber
disk.CapacityInKB = int64(volumeOptions.CapacityKB)
backing := disk.Backing.(*types.VirtualDiskFlatVer2BackingInfo)
backing.DiskMode = string(types.VirtualDiskModeIndependent_persistent)
switch diskFormat {
case ThinDiskType:
backing.ThinProvisioned = types.NewBool(true)
case EagerZeroedThickDiskType:
backing.EagerlyScrub = types.NewBool(true)
default:
backing.ThinProvisioned = types.NewBool(false)
}
// Reconfigure VM
virtualMachineConfigSpec := types.VirtualMachineConfigSpec{}
deviceConfigSpec := &types.VirtualDeviceConfigSpec{
Device: disk,
Operation: types.VirtualDeviceConfigSpecOperationAdd,
FileOperation: types.VirtualDeviceConfigSpecFileOperationCreate,
}
storageProfileSpec := &types.VirtualMachineDefinedProfileSpec{
ProfileId: "",
ProfileData: &types.VirtualMachineProfileRawData{
ExtensionKey: "com.vmware.vim.sps",
ObjectData: volumeOptions.StorageProfileData,
},
}
deviceConfigSpec.Profile = append(deviceConfigSpec.Profile, storageProfileSpec)
virtualMachineConfigSpec.DeviceChange = append(virtualMachineConfigSpec.DeviceChange, deviceConfigSpec)
task, err := virtualMachine.Reconfigure(ctx, virtualMachineConfigSpec)
if err != nil {
glog.Errorf("Failed to reconfigure the VM with the disk with err - %v.", err)
return "", err
}
err = task.Wait(ctx)
if err != nil {
glog.Errorf("Failed to reconfigure the VM with the disk with err - %v.", err)
return "", err
}
return vmDiskPath, nil
}
// creating a scsi controller as there is none found.
func createAndAttachSCSIControllerToVM(ctx context.Context, vm *object.VirtualMachine, diskControllerType string) (types.BaseVirtualDevice, error) {
// Get VM device list
vmDevices, err := vm.Device(ctx)
if err != nil {
return nil, err
}
allSCSIControllers := getSCSIControllers(vmDevices)
if len(allSCSIControllers) >= SCSIControllerLimit {
// we reached the maximum number of controllers we can attach
return nil, fmt.Errorf("SCSI Controller Limit of %d has been reached, cannot create another SCSI controller", SCSIControllerLimit)
}
newSCSIController, err := vmDevices.CreateSCSIController(diskControllerType)
if err != nil {
k8runtime.HandleError(fmt.Errorf("error creating new SCSI controller: %v", err))
return nil, err
}
configNewSCSIController := newSCSIController.(types.BaseVirtualSCSIController).GetVirtualSCSIController()
hotAndRemove := true
configNewSCSIController.HotAddRemove = &hotAndRemove
configNewSCSIController.SharedBus = types.VirtualSCSISharing(types.VirtualSCSISharingNoSharing)
// add the scsi controller to virtual machine
err = vm.AddDevice(context.TODO(), newSCSIController)
if err != nil {
glog.V(1).Infof("cannot add SCSI controller to vm - %v", err)
// attempt clean up of scsi controller
if vmDevices, err := vm.Device(ctx); err == nil {
cleanUpController(ctx, newSCSIController, vmDevices, vm)
}
return nil, err
}
return newSCSIController, nil
}
// Create a virtual disk.
func createVirtualDisk(ctx context.Context, c *govmomi.Client, dc *object.Datacenter, ds *object.Datastore, volumeOptions *VolumeOptions) (string, error) {
kubeVolsPath := filepath.Clean(ds.Path(VolDir)) + "/"
// Create a kubevols directory in the datastore if one doesn't exist.
err := makeDirectoryInDatastore(c, dc, kubeVolsPath, false)
if err != nil && err != ErrFileAlreadyExist {
glog.Errorf("Cannot create dir %#v. err %s", kubeVolsPath, err)
return "", err
}
glog.V(4).Infof("Created dir with path as %+q", kubeVolsPath)
vmDiskPath := kubeVolsPath + volumeOptions.Name + ".vmdk"
diskFormat := diskFormatValidType[volumeOptions.DiskFormat]
// Create a virtual disk manager
virtualDiskManager := object.NewVirtualDiskManager(c.Client)
// Create specification for new virtual disk
vmDiskSpec := &types.FileBackedVirtualDiskSpec{
VirtualDiskSpec: types.VirtualDiskSpec{
AdapterType: LSILogicControllerType,
DiskType: diskFormat,
},
CapacityKb: int64(volumeOptions.CapacityKB),
}
// Create virtual disk
task, err := virtualDiskManager.CreateVirtualDisk(ctx, vmDiskPath, dc, vmDiskSpec)
if err != nil {
return "", err
}
return vmDiskPath, task.Wait(ctx)
}
// Check if the provided datastore is VSAN
func checkIfDatastoreTypeIsVSAN(c *govmomi.Client, datastore *object.Datastore) (bool, error) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
pc := property.DefaultCollector(c.Client)
// Convert datastores into list of references
var dsRefs []types.ManagedObjectReference
dsRefs = append(dsRefs, datastore.Reference())
// Retrieve summary property for the given datastore
var dsMorefs []mo.Datastore
err := pc.Retrieve(ctx, dsRefs, []string{"summary"}, &dsMorefs)
if err != nil {
return false, err
}
for _, ds := range dsMorefs {
if ds.Summary.Type == VSANDatastoreType {
return true, nil
}
}
return false, nil
}
// Creates a folder using the specified name. // Creates a folder using the specified name.
// If the intermediate level folders do not exist, // If the intermediate level folders do not exist,
// and the parameter createParents is true, // and the parameter createParents is true,
@ -1378,3 +1783,70 @@ func makeDirectoryInDatastore(c *govmomi.Client, dc *object.Datacenter, path str
return err return err
} }
// Get the folder for a given VM
func getFolder(ctx context.Context, c *govmomi.Client, datacenterName string, folderName string) (*object.Folder, error) {
f := find.NewFinder(c.Client, true)
// Fetch and set data center
dc, err := f.Datacenter(ctx, datacenterName)
if err != nil {
return nil, err
}
f.SetDatacenter(dc)
folderName = strings.TrimSuffix(folderName, "/")
dcFolders, err := dc.Folders(ctx)
vmFolders, _ := dcFolders.VmFolder.Children(ctx)
var vmFolderRefs []types.ManagedObjectReference
for _, vmFolder := range vmFolders {
vmFolderRefs = append(vmFolderRefs, vmFolder.Reference())
}
// Get only references of type folder.
var folderRefs []types.ManagedObjectReference
for _, vmFolder := range vmFolderRefs {
if vmFolder.Type == "Folder" {
folderRefs = append(folderRefs, vmFolder)
}
}
// Find the specific folder reference matching the folder name.
var resultFolder *object.Folder
pc := property.DefaultCollector(c.Client)
for _, folderRef := range folderRefs {
var refs []types.ManagedObjectReference
var folderMorefs []mo.Folder
refs = append(refs, folderRef)
err = pc.Retrieve(ctx, refs, []string{"name"}, &folderMorefs)
for _, fref := range folderMorefs {
if fref.Name == folderName {
resultFolder = object.NewFolder(c.Client, folderRef)
}
}
}
return resultFolder, nil
}
// Delete the VM.
func deleteVM(ctx context.Context, vm *object.VirtualMachine) error {
destroyTask, err := vm.Destroy(ctx)
if err != nil {
return err
}
return destroyTask.Wait(ctx)
}
// Remove the cluster or folder path from the vDiskPath
// for vDiskPath [DatastoreCluster/sharedVmfs-0] kubevols/e2e-vmdk-1234.vmdk, return value is [sharedVmfs-0] kubevols/e2e-vmdk-1234.vmdk
// for vDiskPath [sharedVmfs-0] kubevols/e2e-vmdk-1234.vmdk, return value remains same [sharedVmfs-0] kubevols/e2e-vmdk-1234.vmdk
func removeClusterFromVDiskPath(vDiskPath string) string {
datastore := regexp.MustCompile("\\[(.*?)\\]").FindStringSubmatch(vDiskPath)[1]
if filepath.Base(datastore) != datastore {
vDiskPath = strings.Replace(vDiskPath, datastore, filepath.Base(datastore), 1)
}
return vDiskPath
}

View file

@ -27,10 +27,30 @@ import (
) )
// GetPodTemplateWithHash returns copy of provided template with additional // GetPodTemplateWithHash returns copy of provided template with additional
// label which contains hash of provided template // label which contains hash of provided template and sets default daemon tolerations.
func GetPodTemplateWithGeneration(template v1.PodTemplateSpec, generation int64) v1.PodTemplateSpec { func GetPodTemplateWithGeneration(template v1.PodTemplateSpec, generation int64) v1.PodTemplateSpec {
obj, _ := api.Scheme.DeepCopy(template) obj, _ := api.Scheme.DeepCopy(template)
newTemplate := obj.(v1.PodTemplateSpec) newTemplate := obj.(v1.PodTemplateSpec)
// DaemonSet pods shouldn't be deleted by NodeController in case of node problems.
// Add infinite toleration for taint notReady:NoExecute here
// to survive taint-based eviction enforced by NodeController
// when node turns not ready.
v1.AddOrUpdateTolerationInPodSpec(&newTemplate.Spec, &v1.Toleration{
Key: metav1.TaintNodeNotReady,
Operator: v1.TolerationOpExists,
Effect: v1.TaintEffectNoExecute,
})
// DaemonSet pods shouldn't be deleted by NodeController in case of node problems.
// Add infinite toleration for taint unreachable:NoExecute here
// to survive taint-based eviction enforced by NodeController
// when node turns unreachable.
v1.AddOrUpdateTolerationInPodSpec(&newTemplate.Spec, &v1.Toleration{
Key: metav1.TaintNodeUnreachable,
Operator: v1.TolerationOpExists,
Effect: v1.TaintEffectNoExecute,
})
templateGenerationStr := fmt.Sprint(generation) templateGenerationStr := fmt.Sprint(generation)
newTemplate.ObjectMeta.Labels = labelsutil.CloneAndAddLabel( newTemplate.ObjectMeta.Labels = labelsutil.CloneAndAddLabel(
template.ObjectMeta.Labels, template.ObjectMeta.Labels,

View file

@ -52,7 +52,11 @@ import (
const ( const (
// maxRetries is the number of times a deployment will be retried before it is dropped out of the queue. // maxRetries is the number of times a deployment will be retried before it is dropped out of the queue.
maxRetries = 5 // With the current rate-limiter in use (5ms*2^(maxRetries-1)) the following numbers represent the times
// a deployment is going to be requeued:
//
// 5ms, 10ms, 20ms, 40ms, 80ms, 160ms, 320ms, 640ms, 1.3s, 2.6s, 5.1s, 10.2s, 20.4s, 41s, 82s
maxRetries = 15
) )
// controllerKind contains the schema.GroupVersionKind for this controller type. // controllerKind contains the schema.GroupVersionKind for this controller type.

View file

@ -31,9 +31,9 @@ import (
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util" deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
) )
func maxSurge(val int) *intstr.IntOrString { func intOrStrP(val int) *intstr.IntOrString {
surge := intstr.FromInt(val) intOrStr := intstr.FromInt(val)
return &surge return &intOrStr
} }
func TestScale(t *testing.T) { func TestScale(t *testing.T) {
@ -218,8 +218,8 @@ func TestScale(t *testing.T) {
}, },
{ {
name: "deployment with surge pods", name: "deployment with surge pods",
deployment: newDeployment("foo", 20, nil, maxSurge(2), nil, nil), deployment: newDeployment("foo", 20, nil, intOrStrP(2), nil, nil),
oldDeployment: newDeployment("foo", 10, nil, maxSurge(2), nil, nil), oldDeployment: newDeployment("foo", 10, nil, intOrStrP(2), nil, nil),
newRS: rs("foo-v2", 6, nil, newTimestamp), newRS: rs("foo-v2", 6, nil, newTimestamp),
oldRSs: []*extensions.ReplicaSet{rs("foo-v1", 6, nil, oldTimestamp)}, oldRSs: []*extensions.ReplicaSet{rs("foo-v1", 6, nil, oldTimestamp)},
@ -229,8 +229,8 @@ func TestScale(t *testing.T) {
}, },
{ {
name: "change both surge and size", name: "change both surge and size",
deployment: newDeployment("foo", 50, nil, maxSurge(6), nil, nil), deployment: newDeployment("foo", 50, nil, intOrStrP(6), nil, nil),
oldDeployment: newDeployment("foo", 10, nil, maxSurge(3), nil, nil), oldDeployment: newDeployment("foo", 10, nil, intOrStrP(3), nil, nil),
newRS: rs("foo-v2", 5, nil, newTimestamp), newRS: rs("foo-v2", 5, nil, newTimestamp),
oldRSs: []*extensions.ReplicaSet{rs("foo-v1", 8, nil, oldTimestamp)}, oldRSs: []*extensions.ReplicaSet{rs("foo-v1", 8, nil, oldTimestamp)},
@ -249,6 +249,21 @@ func TestScale(t *testing.T) {
expectedNew: nil, expectedNew: nil,
expectedOld: []*extensions.ReplicaSet{rs("foo-v2", 10, nil, newTimestamp), rs("foo-v1", 4, nil, oldTimestamp)}, expectedOld: []*extensions.ReplicaSet{rs("foo-v2", 10, nil, newTimestamp), rs("foo-v1", 4, nil, oldTimestamp)},
}, },
{
name: "saturated but broken new replica set does not affect old pods",
deployment: newDeployment("foo", 2, nil, intOrStrP(1), intOrStrP(1), nil),
oldDeployment: newDeployment("foo", 2, nil, intOrStrP(1), intOrStrP(1), nil),
newRS: func() *extensions.ReplicaSet {
rs := rs("foo-v2", 2, nil, newTimestamp)
rs.Status.AvailableReplicas = 0
return rs
}(),
oldRSs: []*extensions.ReplicaSet{rs("foo-v1", 1, nil, oldTimestamp)},
expectedNew: rs("foo-v2", 2, nil, newTimestamp),
expectedOld: []*extensions.ReplicaSet{rs("foo-v1", 1, nil, oldTimestamp)},
},
} }
for _, test := range tests { for _, test := range tests {

View file

@ -1059,7 +1059,8 @@ func NewRSNewReplicas(deployment *extensions.Deployment, allRSs []*extensions.Re
// IsSaturated checks if the new replica set is saturated by comparing its size with its deployment size. // IsSaturated checks if the new replica set is saturated by comparing its size with its deployment size.
// Both the deployment and the replica set have to believe this replica set can own all of the desired // Both the deployment and the replica set have to believe this replica set can own all of the desired
// replicas in the deployment and the annotation helps in achieving that. // replicas in the deployment and the annotation helps in achieving that. All pods of the ReplicaSet
// need to be available.
func IsSaturated(deployment *extensions.Deployment, rs *extensions.ReplicaSet) bool { func IsSaturated(deployment *extensions.Deployment, rs *extensions.ReplicaSet) bool {
if rs == nil { if rs == nil {
return false return false
@ -1069,7 +1070,9 @@ func IsSaturated(deployment *extensions.Deployment, rs *extensions.ReplicaSet) b
if err != nil { if err != nil {
return false return false
} }
return *(rs.Spec.Replicas) == *(deployment.Spec.Replicas) && int32(desired) == *(deployment.Spec.Replicas) return *(rs.Spec.Replicas) == *(deployment.Spec.Replicas) &&
int32(desired) == *(deployment.Spec.Replicas) &&
rs.Status.AvailableReplicas == *(deployment.Spec.Replicas)
} }
// WaitForObservedDeployment polls for deployment to be updated so that deployment.Status.ObservedGeneration >= desiredGeneration. // WaitForObservedDeployment polls for deployment to be updated so that deployment.Status.ObservedGeneration >= desiredGeneration.

View file

@ -36,8 +36,9 @@ import (
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset" "k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
extensionslisters "k8s.io/kubernetes/pkg/client/listers/extensions/v1beta1" extensionslisters "k8s.io/kubernetes/pkg/client/listers/extensions/v1beta1"
"k8s.io/kubernetes/pkg/cloudprovider" "k8s.io/kubernetes/pkg/cloudprovider"
"k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/kubelet/util/format" "k8s.io/kubernetes/pkg/kubelet/util/format"
"k8s.io/kubernetes/pkg/util/node" nodepkg "k8s.io/kubernetes/pkg/util/node"
utilversion "k8s.io/kubernetes/pkg/util/version" utilversion "k8s.io/kubernetes/pkg/util/version"
"github.com/golang/glog" "github.com/golang/glog"
@ -102,12 +103,12 @@ func deletePods(kubeClient clientset.Interface, recorder record.EventRecorder, n
// setPodTerminationReason attempts to set a reason and message in the pod status, updates it in the apiserver, // setPodTerminationReason attempts to set a reason and message in the pod status, updates it in the apiserver,
// and returns an error if it encounters one. // and returns an error if it encounters one.
func setPodTerminationReason(kubeClient clientset.Interface, pod *v1.Pod, nodeName string) (*v1.Pod, error) { func setPodTerminationReason(kubeClient clientset.Interface, pod *v1.Pod, nodeName string) (*v1.Pod, error) {
if pod.Status.Reason == node.NodeUnreachablePodReason { if pod.Status.Reason == nodepkg.NodeUnreachablePodReason {
return pod, nil return pod, nil
} }
pod.Status.Reason = node.NodeUnreachablePodReason pod.Status.Reason = nodepkg.NodeUnreachablePodReason
pod.Status.Message = fmt.Sprintf(node.NodeUnreachablePodMessage, nodeName, pod.Name) pod.Status.Message = fmt.Sprintf(nodepkg.NodeUnreachablePodMessage, nodeName, pod.Name)
var updatedPod *v1.Pod var updatedPod *v1.Pod
var err error var err error
@ -286,3 +287,32 @@ func recordNodeStatusChange(recorder record.EventRecorder, node *v1.Node, new_st
// and event is recorded or neither should happen, see issue #6055. // and event is recorded or neither should happen, see issue #6055.
recorder.Eventf(ref, v1.EventTypeNormal, new_status, "Node %s status is now: %s", node.Name, new_status) recorder.Eventf(ref, v1.EventTypeNormal, new_status, "Node %s status is now: %s", node.Name, new_status)
} }
// Returns true in case of success and false otherwise
func swapNodeControllerTaint(kubeClient clientset.Interface, taintToAdd, taintToRemove *v1.Taint, node *v1.Node) bool {
taintToAdd.TimeAdded = metav1.Now()
err := controller.AddOrUpdateTaintOnNode(kubeClient, node.Name, taintToAdd)
if err != nil {
utilruntime.HandleError(
fmt.Errorf(
"unable to taint %v unresponsive Node %q: %v",
taintToAdd.Key,
node.Name,
err))
return false
}
glog.V(4).Infof("Added %v Taint to Node %v", taintToAdd, node.Name)
err = controller.RemoveTaintOffNode(kubeClient, node.Name, taintToRemove, node)
if err != nil {
utilruntime.HandleError(
fmt.Errorf(
"unable to remove %v unneeded taint from unresponsive Node %q: %v",
taintToRemove.Key,
node.Name,
err))
return false
}
glog.V(4).Infof("Made sure that Node %v has no %v Taint", node.Name, taintToRemove)
return true
}

View file

@ -478,6 +478,74 @@ func NewNodeController(
return nc, nil return nc, nil
} }
func (nc *NodeController) doEvictionPass() {
nc.evictorLock.Lock()
defer nc.evictorLock.Unlock()
for k := range nc.zonePodEvictor {
// Function should return 'false' and a time after which it should be retried, or 'true' if it shouldn't (it succeeded).
nc.zonePodEvictor[k].Try(func(value TimedValue) (bool, time.Duration) {
node, err := nc.nodeLister.Get(value.Value)
if apierrors.IsNotFound(err) {
glog.Warningf("Node %v no longer present in nodeLister!", value.Value)
} else if err != nil {
glog.Warningf("Failed to get Node %v from the nodeLister: %v", value.Value, err)
} else {
zone := utilnode.GetZoneKey(node)
EvictionsNumber.WithLabelValues(zone).Inc()
}
nodeUid, _ := value.UID.(string)
remaining, err := deletePods(nc.kubeClient, nc.recorder, value.Value, nodeUid, nc.daemonSetStore)
if err != nil {
utilruntime.HandleError(fmt.Errorf("unable to evict node %q: %v", value.Value, err))
return false, 0
}
if remaining {
glog.Infof("Pods awaiting deletion due to NodeController eviction")
}
return true, 0
})
}
}
func (nc *NodeController) doTaintingPass() {
nc.evictorLock.Lock()
defer nc.evictorLock.Unlock()
for k := range nc.zoneNotReadyOrUnreachableTainer {
// Function should return 'false' and a time after which it should be retried, or 'true' if it shouldn't (it succeeded).
nc.zoneNotReadyOrUnreachableTainer[k].Try(func(value TimedValue) (bool, time.Duration) {
node, err := nc.nodeLister.Get(value.Value)
if apierrors.IsNotFound(err) {
glog.Warningf("Node %v no longer present in nodeLister!", value.Value)
return true, 0
} else if err != nil {
glog.Warningf("Failed to get Node %v from the nodeLister: %v", value.Value, err)
// retry in 50 millisecond
return false, 50 * time.Millisecond
} else {
zone := utilnode.GetZoneKey(node)
EvictionsNumber.WithLabelValues(zone).Inc()
}
_, condition := v1.GetNodeCondition(&node.Status, v1.NodeReady)
// Because we want to mimic NodeStatus.Condition["Ready"] we make "unreachable" and "not ready" taints mutually exclusive.
taintToAdd := v1.Taint{}
oppositeTaint := v1.Taint{}
if condition.Status == v1.ConditionFalse {
taintToAdd = *NotReadyTaintTemplate
oppositeTaint = *UnreachableTaintTemplate
} else if condition.Status == v1.ConditionUnknown {
taintToAdd = *UnreachableTaintTemplate
oppositeTaint = *NotReadyTaintTemplate
} else {
// It seems that the Node is ready again, so there's no need to taint it.
glog.V(4).Infof("Node %v was in a taint queue, but it's ready now. Ignoring taint request.", value.Value)
return true, 0
}
return swapNodeControllerTaint(nc.kubeClient, &taintToAdd, &oppositeTaint, node), 0
})
}
}
// Run starts an asynchronous loop that monitors the status of cluster nodes. // Run starts an asynchronous loop that monitors the status of cluster nodes.
func (nc *NodeController) Run() { func (nc *NodeController) Run() {
go func() { go func() {
@ -502,101 +570,12 @@ func (nc *NodeController) Run() {
if nc.useTaintBasedEvictions { if nc.useTaintBasedEvictions {
// Handling taint based evictions. Because we don't want a dedicated logic in TaintManager for NC-originated // Handling taint based evictions. Because we don't want a dedicated logic in TaintManager for NC-originated
// taints and we normally don't rate limit evictions caused by taints, we need to rate limit adding taints. // taints and we normally don't rate limit evictions caused by taints, we need to rate limit adding taints.
go wait.Until(func() { go wait.Until(nc.doTaintingPass, nodeEvictionPeriod, wait.NeverStop)
nc.evictorLock.Lock()
defer nc.evictorLock.Unlock()
for k := range nc.zoneNotReadyOrUnreachableTainer {
// Function should return 'false' and a time after which it should be retried, or 'true' if it shouldn't (it succeeded).
nc.zoneNotReadyOrUnreachableTainer[k].Try(func(value TimedValue) (bool, time.Duration) {
node, err := nc.nodeLister.Get(value.Value)
if apierrors.IsNotFound(err) {
glog.Warningf("Node %v no longer present in nodeLister!", value.Value)
return true, 0
} else if err != nil {
glog.Warningf("Failed to get Node %v from the nodeLister: %v", value.Value, err)
// retry in 50 millisecond
return false, 50 * time.Millisecond
} else {
zone := utilnode.GetZoneKey(node)
EvictionsNumber.WithLabelValues(zone).Inc()
}
_, condition := v1.GetNodeCondition(&node.Status, v1.NodeReady)
// Because we want to mimic NodeStatus.Condition["Ready"] we make "unreachable" and "not ready" taints mutually exclusive.
taintToAdd := v1.Taint{}
oppositeTaint := v1.Taint{}
if condition.Status == v1.ConditionFalse {
taintToAdd = *NotReadyTaintTemplate
oppositeTaint = *UnreachableTaintTemplate
} else if condition.Status == v1.ConditionUnknown {
taintToAdd = *UnreachableTaintTemplate
oppositeTaint = *NotReadyTaintTemplate
} else {
// It seems that the Node is ready again, so there's no need to taint it.
glog.V(4).Infof("Node %v was in a taint queue, but it's ready now. Ignoring taint request.", value.Value)
return true, 0
}
taintToAdd.TimeAdded = metav1.Now()
err = controller.AddOrUpdateTaintOnNode(nc.kubeClient, value.Value, &taintToAdd)
if err != nil {
utilruntime.HandleError(
fmt.Errorf(
"unable to taint %v unresponsive Node %q: %v",
taintToAdd.Key,
value.Value,
err))
return false, 0
} else {
glog.V(4).Info("Added %v Taint to Node %v", taintToAdd, value.Value)
}
err = controller.RemoveTaintOffNode(nc.kubeClient, value.Value, &oppositeTaint, node)
if err != nil {
utilruntime.HandleError(
fmt.Errorf(
"unable to remove %v unneeded taint from unresponsive Node %q: %v",
oppositeTaint.Key,
value.Value,
err))
return false, 0
} else {
glog.V(4).Info("Made sure that Node %v has no %v Taint", value.Value, oppositeTaint)
}
return true, 0
})
}
}, nodeEvictionPeriod, wait.NeverStop)
} else { } else {
// Managing eviction of nodes: // Managing eviction of nodes:
// When we delete pods off a node, if the node was not empty at the time we then // When we delete pods off a node, if the node was not empty at the time we then
// queue an eviction watcher. If we hit an error, retry deletion. // queue an eviction watcher. If we hit an error, retry deletion.
go wait.Until(func() { go wait.Until(nc.doEvictionPass, nodeEvictionPeriod, wait.NeverStop)
nc.evictorLock.Lock()
defer nc.evictorLock.Unlock()
for k := range nc.zonePodEvictor {
// Function should return 'false' and a time after which it should be retried, or 'true' if it shouldn't (it succeeded).
nc.zonePodEvictor[k].Try(func(value TimedValue) (bool, time.Duration) {
node, err := nc.nodeLister.Get(value.Value)
if apierrors.IsNotFound(err) {
glog.Warningf("Node %v no longer present in nodeLister!", value.Value)
} else if err != nil {
glog.Warningf("Failed to get Node %v from the nodeLister: %v", value.Value, err)
} else {
zone := utilnode.GetZoneKey(node)
EvictionsNumber.WithLabelValues(zone).Inc()
}
nodeUid, _ := value.UID.(string)
remaining, err := deletePods(nc.kubeClient, nc.recorder, value.Value, nodeUid, nc.daemonSetStore)
if err != nil {
utilruntime.HandleError(fmt.Errorf("unable to evict node %q: %v", value.Value, err))
return false, 0
}
if remaining {
glog.Infof("Pods awaiting deletion due to NodeController eviction")
}
return true, 0
})
}
}, nodeEvictionPeriod, wait.NeverStop)
} }
}() }()
} }
@ -685,7 +664,13 @@ func (nc *NodeController) monitorNodeStatus() error {
// Check eviction timeout against decisionTimestamp // Check eviction timeout against decisionTimestamp
if observedReadyCondition.Status == v1.ConditionFalse { if observedReadyCondition.Status == v1.ConditionFalse {
if nc.useTaintBasedEvictions { if nc.useTaintBasedEvictions {
if nc.markNodeForTainting(node) { // We want to update the taint straight away if Node is already tainted with the UnreachableTaint
if v1.TaintExists(node.Spec.Taints, UnreachableTaintTemplate) {
taintToAdd := *NotReadyTaintTemplate
if !swapNodeControllerTaint(nc.kubeClient, &taintToAdd, UnreachableTaintTemplate, node) {
glog.Errorf("Failed to instantly swap UnreachableTaint to NotReadyTaint. Will try again in the next cycle.")
}
} else if nc.markNodeForTainting(node) {
glog.V(2).Infof("Node %v is NotReady as of %v. Adding it to the Taint queue.", glog.V(2).Infof("Node %v is NotReady as of %v. Adding it to the Taint queue.",
node.Name, node.Name,
decisionTimestamp, decisionTimestamp,
@ -706,7 +691,13 @@ func (nc *NodeController) monitorNodeStatus() error {
} }
if observedReadyCondition.Status == v1.ConditionUnknown { if observedReadyCondition.Status == v1.ConditionUnknown {
if nc.useTaintBasedEvictions { if nc.useTaintBasedEvictions {
if nc.markNodeForTainting(node) { // We want to update the taint straight away if Node is already tainted with the UnreachableTaint
if v1.TaintExists(node.Spec.Taints, NotReadyTaintTemplate) {
taintToAdd := *UnreachableTaintTemplate
if !swapNodeControllerTaint(nc.kubeClient, &taintToAdd, NotReadyTaintTemplate, node) {
glog.Errorf("Failed to instantly swap UnreachableTaint to NotReadyTaint. Will try again in the next cycle.")
}
} else if nc.markNodeForTainting(node) {
glog.V(2).Infof("Node %v is unresponsive as of %v. Adding it to the Taint queue.", glog.V(2).Infof("Node %v is unresponsive as of %v. Adding it to the Taint queue.",
node.Name, node.Name,
decisionTimestamp, decisionTimestamp,

View file

@ -74,7 +74,9 @@ func NewNodeControllerFromClient(
clusterCIDR *net.IPNet, clusterCIDR *net.IPNet,
serviceCIDR *net.IPNet, serviceCIDR *net.IPNet,
nodeCIDRMaskSize int, nodeCIDRMaskSize int,
allocateNodeCIDRs bool) (*nodeController, error) { allocateNodeCIDRs bool,
useTaints bool,
) (*nodeController, error) {
factory := informers.NewSharedInformerFactory(kubeClient, controller.NoResyncPeriodFunc()) factory := informers.NewSharedInformerFactory(kubeClient, controller.NoResyncPeriodFunc())
@ -99,8 +101,8 @@ func NewNodeControllerFromClient(
serviceCIDR, serviceCIDR,
nodeCIDRMaskSize, nodeCIDRMaskSize,
allocateNodeCIDRs, allocateNodeCIDRs,
false, useTaints,
false, useTaints,
) )
if err != nil { if err != nil {
return nil, err return nil, err
@ -549,7 +551,7 @@ func TestMonitorNodeStatusEvictPods(t *testing.T) {
for _, item := range table { for _, item := range table {
nodeController, _ := NewNodeControllerFromClient(nil, item.fakeNodeHandler, nodeController, _ := NewNodeControllerFromClient(nil, item.fakeNodeHandler,
evictionTimeout, testRateLimiterQPS, testRateLimiterQPS, testLargeClusterThreshold, testUnhealtyThreshold, testNodeMonitorGracePeriod, evictionTimeout, testRateLimiterQPS, testRateLimiterQPS, testLargeClusterThreshold, testUnhealtyThreshold, testNodeMonitorGracePeriod,
testNodeStartupGracePeriod, testNodeMonitorPeriod, nil, nil, 0, false) testNodeStartupGracePeriod, testNodeMonitorPeriod, nil, nil, 0, false, false)
nodeController.now = func() metav1.Time { return fakeNow } nodeController.now = func() metav1.Time { return fakeNow }
nodeController.recorder = testutil.NewFakeRecorder() nodeController.recorder = testutil.NewFakeRecorder()
for _, ds := range item.daemonSets { for _, ds := range item.daemonSets {
@ -698,7 +700,7 @@ func TestPodStatusChange(t *testing.T) {
for _, item := range table { for _, item := range table {
nodeController, _ := NewNodeControllerFromClient(nil, item.fakeNodeHandler, nodeController, _ := NewNodeControllerFromClient(nil, item.fakeNodeHandler,
evictionTimeout, testRateLimiterQPS, testRateLimiterQPS, testLargeClusterThreshold, testUnhealtyThreshold, testNodeMonitorGracePeriod, evictionTimeout, testRateLimiterQPS, testRateLimiterQPS, testLargeClusterThreshold, testUnhealtyThreshold, testNodeMonitorGracePeriod,
testNodeStartupGracePeriod, testNodeMonitorPeriod, nil, nil, 0, false) testNodeStartupGracePeriod, testNodeMonitorPeriod, nil, nil, 0, false, false)
nodeController.now = func() metav1.Time { return fakeNow } nodeController.now = func() metav1.Time { return fakeNow }
nodeController.recorder = testutil.NewFakeRecorder() nodeController.recorder = testutil.NewFakeRecorder()
if err := syncNodeStore(nodeController, item.fakeNodeHandler); err != nil { if err := syncNodeStore(nodeController, item.fakeNodeHandler); err != nil {
@ -1215,7 +1217,7 @@ func TestMonitorNodeStatusEvictPodsWithDisruption(t *testing.T) {
} }
nodeController, _ := NewNodeControllerFromClient(nil, fakeNodeHandler, nodeController, _ := NewNodeControllerFromClient(nil, fakeNodeHandler,
evictionTimeout, testRateLimiterQPS, testRateLimiterQPS, testLargeClusterThreshold, testUnhealtyThreshold, testNodeMonitorGracePeriod, evictionTimeout, testRateLimiterQPS, testRateLimiterQPS, testLargeClusterThreshold, testUnhealtyThreshold, testNodeMonitorGracePeriod,
testNodeStartupGracePeriod, testNodeMonitorPeriod, nil, nil, 0, false) testNodeStartupGracePeriod, testNodeMonitorPeriod, nil, nil, 0, false, false)
nodeController.now = func() metav1.Time { return fakeNow } nodeController.now = func() metav1.Time { return fakeNow }
nodeController.enterPartialDisruptionFunc = func(nodeNum int) float32 { nodeController.enterPartialDisruptionFunc = func(nodeNum int) float32 {
return testRateLimiterQPS return testRateLimiterQPS
@ -1310,7 +1312,7 @@ func TestCloudProviderNoRateLimit(t *testing.T) {
nodeController, _ := NewNodeControllerFromClient(nil, fnh, 10*time.Minute, nodeController, _ := NewNodeControllerFromClient(nil, fnh, 10*time.Minute,
testRateLimiterQPS, testRateLimiterQPS, testLargeClusterThreshold, testUnhealtyThreshold, testRateLimiterQPS, testRateLimiterQPS, testLargeClusterThreshold, testUnhealtyThreshold,
testNodeMonitorGracePeriod, testNodeStartupGracePeriod, testNodeMonitorGracePeriod, testNodeStartupGracePeriod,
testNodeMonitorPeriod, nil, nil, 0, false) testNodeMonitorPeriod, nil, nil, 0, false, false)
nodeController.cloud = &fakecloud.FakeCloud{} nodeController.cloud = &fakecloud.FakeCloud{}
nodeController.now = func() metav1.Time { return metav1.Date(2016, 1, 1, 12, 0, 0, 0, time.UTC) } nodeController.now = func() metav1.Time { return metav1.Date(2016, 1, 1, 12, 0, 0, 0, time.UTC) }
nodeController.recorder = testutil.NewFakeRecorder() nodeController.recorder = testutil.NewFakeRecorder()
@ -1579,7 +1581,7 @@ func TestMonitorNodeStatusUpdateStatus(t *testing.T) {
for i, item := range table { for i, item := range table {
nodeController, _ := NewNodeControllerFromClient(nil, item.fakeNodeHandler, 5*time.Minute, nodeController, _ := NewNodeControllerFromClient(nil, item.fakeNodeHandler, 5*time.Minute,
testRateLimiterQPS, testRateLimiterQPS, testLargeClusterThreshold, testUnhealtyThreshold, testRateLimiterQPS, testRateLimiterQPS, testLargeClusterThreshold, testUnhealtyThreshold,
testNodeMonitorGracePeriod, testNodeStartupGracePeriod, testNodeMonitorPeriod, nil, nil, 0, false) testNodeMonitorGracePeriod, testNodeStartupGracePeriod, testNodeMonitorPeriod, nil, nil, 0, false, false)
nodeController.now = func() metav1.Time { return fakeNow } nodeController.now = func() metav1.Time { return fakeNow }
nodeController.recorder = testutil.NewFakeRecorder() nodeController.recorder = testutil.NewFakeRecorder()
if err := syncNodeStore(nodeController, item.fakeNodeHandler); err != nil { if err := syncNodeStore(nodeController, item.fakeNodeHandler); err != nil {
@ -1813,7 +1815,7 @@ func TestMonitorNodeStatusMarkPodsNotReady(t *testing.T) {
for i, item := range table { for i, item := range table {
nodeController, _ := NewNodeControllerFromClient(nil, item.fakeNodeHandler, 5*time.Minute, nodeController, _ := NewNodeControllerFromClient(nil, item.fakeNodeHandler, 5*time.Minute,
testRateLimiterQPS, testRateLimiterQPS, testLargeClusterThreshold, testUnhealtyThreshold, testRateLimiterQPS, testRateLimiterQPS, testLargeClusterThreshold, testUnhealtyThreshold,
testNodeMonitorGracePeriod, testNodeStartupGracePeriod, testNodeMonitorPeriod, nil, nil, 0, false) testNodeMonitorGracePeriod, testNodeStartupGracePeriod, testNodeMonitorPeriod, nil, nil, 0, false, false)
nodeController.now = func() metav1.Time { return fakeNow } nodeController.now = func() metav1.Time { return fakeNow }
nodeController.recorder = testutil.NewFakeRecorder() nodeController.recorder = testutil.NewFakeRecorder()
if err := syncNodeStore(nodeController, item.fakeNodeHandler); err != nil { if err := syncNodeStore(nodeController, item.fakeNodeHandler); err != nil {
@ -1845,6 +1847,146 @@ func TestMonitorNodeStatusMarkPodsNotReady(t *testing.T) {
} }
} }
func TestSwapUnreachableNotReadyTaints(t *testing.T) {
fakeNow := metav1.Date(2017, 1, 1, 12, 0, 0, 0, time.UTC)
evictionTimeout := 10 * time.Minute
fakeNodeHandler := &testutil.FakeNodeHandler{
Existing: []*v1.Node{
{
ObjectMeta: metav1.ObjectMeta{
Name: "node0",
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
Labels: map[string]string{
metav1.LabelZoneRegion: "region1",
metav1.LabelZoneFailureDomain: "zone1",
},
},
Status: v1.NodeStatus{
Conditions: []v1.NodeCondition{
{
Type: v1.NodeReady,
Status: v1.ConditionUnknown,
LastHeartbeatTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
LastTransitionTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
},
},
},
},
// Because of the logic that prevents NC from evicting anything when all Nodes are NotReady
// we need second healthy node in tests. Because of how the tests are written we need to update
// the status of this Node.
{
ObjectMeta: metav1.ObjectMeta{
Name: "node1",
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
Labels: map[string]string{
metav1.LabelZoneRegion: "region1",
metav1.LabelZoneFailureDomain: "zone1",
},
},
Status: v1.NodeStatus{
Conditions: []v1.NodeCondition{
{
Type: v1.NodeReady,
Status: v1.ConditionTrue,
LastHeartbeatTime: metav1.Date(2017, 1, 1, 12, 0, 0, 0, time.UTC),
LastTransitionTime: metav1.Date(2017, 1, 1, 12, 0, 0, 0, time.UTC),
},
},
},
},
},
Clientset: fake.NewSimpleClientset(&v1.PodList{Items: []v1.Pod{*testutil.NewPod("pod0", "node0")}}),
}
timeToPass := evictionTimeout
newNodeStatus := v1.NodeStatus{
Conditions: []v1.NodeCondition{
{
Type: v1.NodeReady,
Status: v1.ConditionFalse,
// Node status has just been updated, and is NotReady for 10min.
LastHeartbeatTime: metav1.Date(2017, 1, 1, 12, 9, 0, 0, time.UTC),
LastTransitionTime: metav1.Date(2017, 1, 1, 12, 0, 0, 0, time.UTC),
},
},
}
healthyNodeNewStatus := v1.NodeStatus{
Conditions: []v1.NodeCondition{
{
Type: v1.NodeReady,
Status: v1.ConditionTrue,
LastHeartbeatTime: metav1.Date(2017, 1, 1, 12, 10, 0, 0, time.UTC),
LastTransitionTime: metav1.Date(2017, 1, 1, 12, 0, 0, 0, time.UTC),
},
},
}
originalTaint := UnreachableTaintTemplate
updatedTaint := NotReadyTaintTemplate
nodeController, _ := NewNodeControllerFromClient(nil, fakeNodeHandler,
evictionTimeout, testRateLimiterQPS, testRateLimiterQPS, testLargeClusterThreshold, testUnhealtyThreshold, testNodeMonitorGracePeriod,
testNodeStartupGracePeriod, testNodeMonitorPeriod, nil, nil, 0, false, true)
nodeController.now = func() metav1.Time { return fakeNow }
nodeController.recorder = testutil.NewFakeRecorder()
if err := syncNodeStore(nodeController, fakeNodeHandler); err != nil {
t.Errorf("unexpected error: %v", err)
}
if err := nodeController.monitorNodeStatus(); err != nil {
t.Errorf("unexpected error: %v", err)
}
nodeController.doTaintingPass()
node0, err := fakeNodeHandler.Get("node0", metav1.GetOptions{})
if err != nil {
t.Errorf("Can't get current node0...")
return
}
node1, err := fakeNodeHandler.Get("node1", metav1.GetOptions{})
if err != nil {
t.Errorf("Can't get current node1...")
return
}
if originalTaint != nil && !v1.TaintExists(node0.Spec.Taints, originalTaint) {
t.Errorf("Can't find taint %v in %v", originalTaint, node0.Spec.Taints)
}
nodeController.now = func() metav1.Time { return metav1.Time{Time: fakeNow.Add(timeToPass)} }
node0.Status = newNodeStatus
node1.Status = healthyNodeNewStatus
_, err = fakeNodeHandler.UpdateStatus(node0)
if err != nil {
t.Errorf(err.Error())
return
}
_, err = fakeNodeHandler.UpdateStatus(node1)
if err != nil {
t.Errorf(err.Error())
return
}
if err := syncNodeStore(nodeController, fakeNodeHandler); err != nil {
t.Errorf("unexpected error: %v", err)
}
if err := nodeController.monitorNodeStatus(); err != nil {
t.Errorf("unexpected error: %v", err)
}
nodeController.doTaintingPass()
node0, err = fakeNodeHandler.Get("node0", metav1.GetOptions{})
if err != nil {
t.Errorf("Can't get current node0...")
return
}
if updatedTaint != nil {
if !v1.TaintExists(node0.Spec.Taints, updatedTaint) {
t.Errorf("Can't find taint %v in %v", updatedTaint, node0.Spec.Taints)
}
}
}
func TestNodeEventGeneration(t *testing.T) { func TestNodeEventGeneration(t *testing.T) {
fakeNow := metav1.Date(2016, 9, 10, 12, 0, 0, 0, time.UTC) fakeNow := metav1.Date(2016, 9, 10, 12, 0, 0, 0, time.UTC)
fakeNodeHandler := &testutil.FakeNodeHandler{ fakeNodeHandler := &testutil.FakeNodeHandler{
@ -1876,7 +2018,7 @@ func TestNodeEventGeneration(t *testing.T) {
nodeController, _ := NewNodeControllerFromClient(nil, fakeNodeHandler, 5*time.Minute, nodeController, _ := NewNodeControllerFromClient(nil, fakeNodeHandler, 5*time.Minute,
testRateLimiterQPS, testRateLimiterQPS, testLargeClusterThreshold, testUnhealtyThreshold, testRateLimiterQPS, testRateLimiterQPS, testLargeClusterThreshold, testUnhealtyThreshold,
testNodeMonitorGracePeriod, testNodeStartupGracePeriod, testNodeMonitorGracePeriod, testNodeStartupGracePeriod,
testNodeMonitorPeriod, nil, nil, 0, false) testNodeMonitorPeriod, nil, nil, 0, false, false)
nodeController.cloud = &fakecloud.FakeCloud{} nodeController.cloud = &fakecloud.FakeCloud{}
nodeController.nodeExistsInCloudProvider = func(nodeName types.NodeName) (bool, error) { nodeController.nodeExistsInCloudProvider = func(nodeName types.NodeName) (bool, error) {
return false, nil return false, nil
@ -1987,7 +2129,7 @@ func TestCheckPod(t *testing.T) {
}, },
} }
nc, _ := NewNodeControllerFromClient(nil, fake.NewSimpleClientset(), 0, 0, 0, 0, 0, 0, 0, 0, nil, nil, 0, false) nc, _ := NewNodeControllerFromClient(nil, fake.NewSimpleClientset(), 0, 0, 0, 0, 0, 0, 0, 0, nil, nil, 0, false, false)
nc.nodeInformer.Informer().GetStore().Add(&v1.Node{ nc.nodeInformer.Informer().GetStore().Add(&v1.Node{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: "new", Name: "new",

View file

@ -17,6 +17,7 @@ go_library(
"//pkg/client/clientset_generated/clientset/fake:go_default_library", "//pkg/client/clientset_generated/clientset/fake:go_default_library",
"//pkg/client/clientset_generated/clientset/typed/core/v1:go_default_library", "//pkg/client/clientset_generated/clientset/typed/core/v1:go_default_library",
"//pkg/util/node:go_default_library", "//pkg/util/node:go_default_library",
"//vendor:github.com/evanphx/json-patch",
"//vendor:github.com/golang/glog", "//vendor:github.com/golang/glog",
"//vendor:k8s.io/apimachinery/pkg/api/errors", "//vendor:k8s.io/apimachinery/pkg/api/errors",
"//vendor:k8s.io/apimachinery/pkg/api/resource", "//vendor:k8s.io/apimachinery/pkg/api/resource",
@ -24,6 +25,7 @@ go_library(
"//vendor:k8s.io/apimachinery/pkg/runtime", "//vendor:k8s.io/apimachinery/pkg/runtime",
"//vendor:k8s.io/apimachinery/pkg/types", "//vendor:k8s.io/apimachinery/pkg/types",
"//vendor:k8s.io/apimachinery/pkg/util/sets", "//vendor:k8s.io/apimachinery/pkg/util/sets",
"//vendor:k8s.io/apimachinery/pkg/util/strategicpatch",
"//vendor:k8s.io/apimachinery/pkg/watch", "//vendor:k8s.io/apimachinery/pkg/watch",
"//vendor:k8s.io/client-go/pkg/api/v1", "//vendor:k8s.io/client-go/pkg/api/v1",
"//vendor:k8s.io/client-go/util/clock", "//vendor:k8s.io/client-go/util/clock",

View file

@ -17,6 +17,7 @@ limitations under the License.
package testutil package testutil
import ( import (
"encoding/json"
"errors" "errors"
"fmt" "fmt"
"sync" "sync"
@ -28,16 +29,19 @@ import (
"k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/strategicpatch"
"k8s.io/apimachinery/pkg/watch" "k8s.io/apimachinery/pkg/watch"
clientv1 "k8s.io/client-go/pkg/api/v1" clientv1 "k8s.io/client-go/pkg/api/v1"
"k8s.io/client-go/util/clock" "k8s.io/client-go/util/clock"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset/fake" "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/fake"
v1core "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1" v1core "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1"
utilnode "k8s.io/kubernetes/pkg/util/node" utilnode "k8s.io/kubernetes/pkg/util/node"
"github.com/evanphx/json-patch"
"github.com/golang/glog" "github.com/golang/glog"
) )
@ -189,6 +193,7 @@ func (m *FakeNodeHandler) Update(node *v1.Node) (*v1.Node, error) {
m.RequestCount++ m.RequestCount++
m.lock.Unlock() m.lock.Unlock()
}() }()
nodeCopy := *node nodeCopy := *node
for i, updateNode := range m.UpdatedNodes { for i, updateNode := range m.UpdatedNodes {
if updateNode.Name == nodeCopy.Name { if updateNode.Name == nodeCopy.Name {
@ -207,6 +212,35 @@ func (m *FakeNodeHandler) UpdateStatus(node *v1.Node) (*v1.Node, error) {
m.RequestCount++ m.RequestCount++
m.lock.Unlock() m.lock.Unlock()
}() }()
var origNodeCopy v1.Node
found := false
for i := range m.Existing {
if m.Existing[i].Name == node.Name {
origNodeCopy = *m.Existing[i]
found = true
}
}
updatedNodeIndex := -1
for i := range m.UpdatedNodes {
if m.UpdatedNodes[i].Name == node.Name {
origNodeCopy = *m.UpdatedNodes[i]
updatedNodeIndex = i
found = true
}
}
if !found {
return nil, fmt.Errorf("Not found node %v", node)
}
origNodeCopy.Status = node.Status
if updatedNodeIndex < 0 {
m.UpdatedNodes = append(m.UpdatedNodes, &origNodeCopy)
} else {
m.UpdatedNodes[updatedNodeIndex] = &origNodeCopy
}
nodeCopy := *node nodeCopy := *node
m.UpdatedNodeStatuses = append(m.UpdatedNodeStatuses, &nodeCopy) m.UpdatedNodeStatuses = append(m.UpdatedNodeStatuses, &nodeCopy)
return node, nil return node, nil
@ -225,7 +259,76 @@ func (m *FakeNodeHandler) Watch(opts metav1.ListOptions) (watch.Interface, error
// Patch patches a Node in the fake store. // Patch patches a Node in the fake store.
func (m *FakeNodeHandler) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (*v1.Node, error) { func (m *FakeNodeHandler) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (*v1.Node, error) {
return nil, nil m.lock.Lock()
defer func() {
m.RequestCount++
m.lock.Unlock()
}()
var nodeCopy v1.Node
for i := range m.Existing {
if m.Existing[i].Name == name {
nodeCopy = *m.Existing[i]
}
}
updatedNodeIndex := -1
for i := range m.UpdatedNodes {
if m.UpdatedNodes[i].Name == name {
nodeCopy = *m.UpdatedNodes[i]
updatedNodeIndex = i
}
}
originalObjJS, err := json.Marshal(nodeCopy)
if err != nil {
glog.Errorf("Failed to marshal %v", nodeCopy)
return nil, nil
}
var originalNode v1.Node
if err = json.Unmarshal(originalObjJS, &originalNode); err != nil {
glog.Errorf("Failed to unmarshall original object: %v", err)
return nil, nil
}
var patchedObjJS []byte
switch pt {
case types.JSONPatchType:
patchObj, err := jsonpatch.DecodePatch(data)
if err != nil {
glog.Error(err.Error())
return nil, nil
}
if patchedObjJS, err = patchObj.Apply(originalObjJS); err != nil {
glog.Error(err.Error())
return nil, nil
}
case types.MergePatchType:
if patchedObjJS, err = jsonpatch.MergePatch(originalObjJS, data); err != nil {
glog.Error(err.Error())
return nil, nil
}
case types.StrategicMergePatchType:
if patchedObjJS, err = strategicpatch.StrategicMergePatch(originalObjJS, data, originalNode); err != nil {
glog.Error(err.Error())
return nil, nil
}
default:
glog.Errorf("unknown Content-Type header for patch: %v", pt)
return nil, nil
}
var updatedNode v1.Node
if err = json.Unmarshal(patchedObjJS, &updatedNode); err != nil {
glog.Errorf("Failed to unmarshall patched object: %v", err)
return nil, nil
}
if updatedNodeIndex < 0 {
m.UpdatedNodes = append(m.UpdatedNodes, &updatedNode)
} else {
m.UpdatedNodes[updatedNodeIndex] = &updatedNode
}
return &updatedNode, nil
} }
// FakeRecorder is used as a fake during testing. // FakeRecorder is used as a fake during testing.

View file

@ -16,6 +16,7 @@ go_library(
], ],
tags = ["automanaged"], tags = ["automanaged"],
deps = [ deps = [
"//cmd/kubeadm/app/constants:go_default_library",
"//pkg/api:go_default_library", "//pkg/api:go_default_library",
"//pkg/api/v1:go_default_library", "//pkg/api/v1:go_default_library",
"//pkg/client/clientset_generated/clientset:go_default_library", "//pkg/client/clientset_generated/clientset:go_default_library",

View file

@ -34,6 +34,7 @@ import (
"k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record" "k8s.io/client-go/tools/record"
"k8s.io/client-go/util/workqueue" "k8s.io/client-go/util/workqueue"
"k8s.io/kubernetes/cmd/kubeadm/app/constants"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset" "k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
@ -617,10 +618,16 @@ func getNodeConditionPredicate() corelisters.NodeConditionPredicate {
return func(node *v1.Node) bool { return func(node *v1.Node) bool {
// We add the master to the node list, but its unschedulable. So we use this to filter // We add the master to the node list, but its unschedulable. So we use this to filter
// the master. // the master.
// TODO: Use a node annotation to indicate the master
if node.Spec.Unschedulable { if node.Spec.Unschedulable {
return false return false
} }
// As of 1.6, we will taint the master, but not necessarily mark it unschedulable.
// Recognize nodes labeled as master, and filter them also, as we were doing previously.
if _, hasMasterRoleLabel := node.Labels[constants.LabelNodeRoleMaster]; hasMasterRoleLabel {
return false
}
// If we have no info, don't accept // If we have no info, don't accept
if len(node.Status.Conditions) == 0 { if len(node.Status.Conditions) == 0 {
return false return false

View file

@ -132,6 +132,13 @@ func (ssc *defaultStatefulSetControl) UpdateStatefulSet(set *apps.StatefulSet, p
if !isCreated(replicas[i]) { if !isCreated(replicas[i]) {
return ssc.podControl.CreateStatefulPod(set, replicas[i]) return ssc.podControl.CreateStatefulPod(set, replicas[i])
} }
// If we find a Pod that is currently terminating, we must wait until graceful deletion
// completes before we continue to make progress.
if isTerminating(replicas[i]) {
glog.V(2).Infof("StatefulSet %s is waiting for Pod %s to Terminate",
set.Name, replicas[i].Name)
return nil
}
// If we have a Pod that has been created but is not running and ready we can not make progress. // If we have a Pod that has been created but is not running and ready we can not make progress.
// We must ensure that all for each Pod, when we create it, all of its predecessors, with respect to its // We must ensure that all for each Pod, when we create it, all of its predecessors, with respect to its
// ordinal, are Running and Ready. // ordinal, are Running and Ready.

View file

@ -403,6 +403,72 @@ func TestDefaultStatefulSetControlUpdatePodFailure(t *testing.T) {
} }
} }
func TestDefaultStatefulSetControlBlocksOnTerminating(t *testing.T) {
set := newStatefulSet(3)
client := fake.NewSimpleClientset(set)
informerFactory := informers.NewSharedInformerFactory(client, controller.NoResyncPeriodFunc())
spc := newFakeStatefulPodControl(informerFactory.Core().V1().Pods(), informerFactory.Apps().V1beta1().StatefulSets())
ssc := NewDefaultStatefulSetControl(spc)
spc.SetUpdateStatefulPodError(apierrors.NewInternalError(errors.New("API server failed")), 0)
stop := make(chan struct{})
defer close(stop)
informerFactory.Start(stop)
cache.WaitForCacheSync(
stop,
informerFactory.Apps().V1beta1().StatefulSets().Informer().HasSynced,
informerFactory.Core().V1().Pods().Informer().HasSynced,
)
if err := scaleUpStatefulSetControl(set, ssc, spc); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
var err error
set, err = spc.setsLister.StatefulSets(set.Namespace).Get(set.Name)
if err != nil {
t.Fatalf("Error getting updated StatefulSet: %v", err)
}
if set.Status.Replicas != 3 {
t.Fatal("Failed to scale StatefulSet to 3 replicas")
}
// scale the set and add a terminated pod
*set.Spec.Replicas = 4
pods, err := spc.addTerminatingPod(set, 2)
if err != nil {
t.Fatal(err)
}
if err := ssc.UpdateStatefulSet(set, pods); err != nil {
t.Fatal(err)
}
pods, err = spc.podsLister.List(labels.Everything())
if err != nil {
t.Fatalf("Error listing pods: %v", err)
}
if len(pods) != 3 {
t.Fatalf("Expected 3 pods, got %d", len(pods))
}
sort.Sort(ascendingOrdinal(pods))
spc.DeleteStatefulPod(set, pods[2])
pods, err = spc.podsLister.List(labels.Everything())
if err != nil {
t.Fatalf("Error listing pods: %v", err)
}
if len(pods) != 2 {
t.Fatalf("Expected 3 pods, got %d", len(pods))
}
if err := scaleUpStatefulSetControl(set, ssc, spc); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
set, err = spc.setsLister.StatefulSets(set.Namespace).Get(set.Name)
if err != nil {
t.Fatalf("Error getting updated StatefulSet: %v", err)
}
if set.Status.Replicas != 4 {
t.Fatal("Failed to scale StatefulSet to 3 replicas")
}
}
func TestDefaultStatefulSetControlUpdateSetStatusFailure(t *testing.T) { func TestDefaultStatefulSetControlUpdateSetStatusFailure(t *testing.T) {
set := newStatefulSet(3) set := newStatefulSet(3)
client := fake.NewSimpleClientset(set) client := fake.NewSimpleClientset(set)
@ -690,7 +756,7 @@ func (spc *fakeStatefulPodControl) setPodInitStatus(set *apps.StatefulSet, ordin
return spc.podsLister.Pods(set.Namespace).List(selector) return spc.podsLister.Pods(set.Namespace).List(selector)
} }
func (spc *fakeStatefulPodControl) addTerminatedPod(set *apps.StatefulSet, ordinal int) ([]*v1.Pod, error) { func (spc *fakeStatefulPodControl) addTerminatingPod(set *apps.StatefulSet, ordinal int) ([]*v1.Pod, error) {
pod := newStatefulSetPod(set, ordinal) pod := newStatefulSetPod(set, ordinal)
pod.Status.Phase = v1.PodRunning pod.Status.Phase = v1.PodRunning
deleted := metav1.NewTime(time.Now()) deleted := metav1.NewTime(time.Now())
@ -906,7 +972,7 @@ func scaleDownStatefulSetControl(set *apps.StatefulSet, ssc StatefulSetControlIn
if err != nil { if err != nil {
return err return err
} }
if pods, err = spc.addTerminatedPod(set, ordinal); err != nil { if pods, err = spc.addTerminatingPod(set, ordinal); err != nil {
return err return err
} }
if err = ssc.UpdateStatefulSet(set, pods); err != nil { if err = ssc.UpdateStatefulSet(set, pods); err != nil {

View file

@ -91,11 +91,11 @@ func TestStatefulSetControllerRespectsTermination(t *testing.T) {
if set.Status.Replicas != 3 { if set.Status.Replicas != 3 {
t.Error("Falied to scale statefulset to 3 replicas") t.Error("Falied to scale statefulset to 3 replicas")
} }
pods, err := spc.addTerminatedPod(set, 3) pods, err := spc.addTerminatingPod(set, 3)
if err != nil { if err != nil {
t.Error(err) t.Error(err)
} }
pods, err = spc.addTerminatedPod(set, 4) pods, err = spc.addTerminatingPod(set, 4)
if err != nil { if err != nil {
t.Error(err) t.Error(err)
} }
@ -669,7 +669,7 @@ func scaleDownStatefulSetController(set *apps.StatefulSet, ssc *StatefulSetContr
spc.setsIndexer.Add(set) spc.setsIndexer.Add(set)
ssc.enqueueStatefulSet(set) ssc.enqueueStatefulSet(set)
fakeWorker(ssc) fakeWorker(ssc)
pods, err = spc.addTerminatedPod(set, ord) pods, err = spc.addTerminatingPod(set, ord)
pod = getPodAtOrdinal(pods, ord) pod = getPodAtOrdinal(pods, ord)
ssc.updatePod(&prev, pod) ssc.updatePod(&prev, pod)
fakeWorker(ssc) fakeWorker(ssc)
@ -679,7 +679,7 @@ func scaleDownStatefulSetController(set *apps.StatefulSet, ssc *StatefulSetContr
for set.Status.Replicas > *set.Spec.Replicas { for set.Status.Replicas > *set.Spec.Replicas {
pods, err = spc.podsLister.Pods(set.Namespace).List(selector) pods, err = spc.podsLister.Pods(set.Namespace).List(selector)
ord := len(pods) ord := len(pods)
pods, err = spc.addTerminatedPod(set, ord) pods, err = spc.addTerminatingPod(set, ord)
pod = getPodAtOrdinal(pods, ord) pod = getPodAtOrdinal(pods, ord)
ssc.updatePod(&prev, pod) ssc.updatePod(&prev, pod)
fakeWorker(ssc) fakeWorker(ssc)

View file

@ -221,14 +221,14 @@ func isFailed(pod *v1.Pod) bool {
return pod.Status.Phase == v1.PodFailed return pod.Status.Phase == v1.PodFailed
} }
// isTerminated returns true if pod's deletion Timestamp has been set // isTerminating returns true if pod's DeletionTimestamp has been set
func isTerminated(pod *v1.Pod) bool { func isTerminating(pod *v1.Pod) bool {
return pod.DeletionTimestamp != nil return pod.DeletionTimestamp != nil
} }
// isHealthy returns true if pod is running and ready and has not been terminated // isHealthy returns true if pod is running and ready and has not been terminated
func isHealthy(pod *v1.Pod) bool { func isHealthy(pod *v1.Pod) bool {
return isRunningAndReady(pod) && !isTerminated(pod) return isRunningAndReady(pod) && !isTerminating(pod)
} }
// newControllerRef returns an ControllerRef pointing to a given StatefulSet. // newControllerRef returns an ControllerRef pointing to a given StatefulSet.

View file

@ -12227,7 +12227,7 @@ func GetOpenAPIDefinitions(ref openapi.ReferenceCallback) map[string]openapi.Ope
}, },
"concurrencyPolicy": { "concurrencyPolicy": {
SchemaProps: spec.SchemaProps{ SchemaProps: spec.SchemaProps{
Description: "ConcurrencyPolicy specifies how to treat concurrent executions of a Job.", Description: "ConcurrencyPolicy specifies how to treat concurrent executions of a Job. Defaults to Allow.",
Type: []string{"string"}, Type: []string{"string"},
Format: "", Format: "",
}, },

View file

@ -75,14 +75,17 @@ func (s *BuiltInAuthorizationOptions) AddFlags(fs *pflag.FlagSet) {
} }
func (s *BuiltInAuthorizationOptions) ToAuthorizationConfig(informerFactory informers.SharedInformerFactory) authorizer.AuthorizationConfig { func (s *BuiltInAuthorizationOptions) Modes() []string {
modes := []string{} modes := []string{}
if len(s.Mode) > 0 { if len(s.Mode) > 0 {
modes = strings.Split(s.Mode, ",") modes = strings.Split(s.Mode, ",")
} }
return modes
}
func (s *BuiltInAuthorizationOptions) ToAuthorizationConfig(informerFactory informers.SharedInformerFactory) authorizer.AuthorizationConfig {
return authorizer.AuthorizationConfig{ return authorizer.AuthorizationConfig{
AuthorizationModes: modes, AuthorizationModes: s.Modes(),
PolicyFile: s.PolicyFile, PolicyFile: s.PolicyFile,
WebhookConfigFile: s.WebhookConfigFile, WebhookConfigFile: s.WebhookConfigFile,
WebhookCacheAuthorizedTTL: s.WebhookCacheAuthorizedTTL, WebhookCacheAuthorizedTTL: s.WebhookCacheAuthorizedTTL,

View file

@ -85,7 +85,7 @@ func (m *containerManager) doWork() {
glog.Errorf("Unable to get docker version: %v", err) glog.Errorf("Unable to get docker version: %v", err)
return return
} }
version, err := utilversion.ParseSemantic(v.Version) version, err := utilversion.ParseGeneric(v.Version)
if err != nil { if err != nil {
glog.Errorf("Unable to parse docker version %q: %v", v.Version, err) glog.Errorf("Unable to parse docker version %q: %v", v.Version, err)
return return

View file

@ -388,6 +388,9 @@ func (ds *dockerService) getDockerAPIVersion() (*semver.Version, error) {
} else { } else {
dv, err = ds.getDockerVersion() dv, err = ds.getDockerVersion()
} }
if err != nil {
return nil, err
}
apiVersion, err := semver.Parse(dv.APIVersion) apiVersion, err := semver.Parse(dv.APIVersion)
if err != nil { if err != nil {

View file

@ -163,7 +163,6 @@ func modifyHostNetworkOptionForContainer(hostNetwork bool, sandboxID string, hc
hc.NetworkMode = dockercontainer.NetworkMode(sandboxNSMode) hc.NetworkMode = dockercontainer.NetworkMode(sandboxNSMode)
hc.IpcMode = dockercontainer.IpcMode(sandboxNSMode) hc.IpcMode = dockercontainer.IpcMode(sandboxNSMode)
hc.UTSMode = "" hc.UTSMode = ""
hc.PidMode = ""
if hostNetwork { if hostNetwork {
hc.UTSMode = namespaceModeHost hc.UTSMode = namespaceModeHost

View file

@ -306,6 +306,7 @@ func TestModifyContainerNamespaceOptions(t *testing.T) {
expected: &dockercontainer.HostConfig{ expected: &dockercontainer.HostConfig{
NetworkMode: dockercontainer.NetworkMode(sandboxNSMode), NetworkMode: dockercontainer.NetworkMode(sandboxNSMode),
IpcMode: dockercontainer.IpcMode(sandboxNSMode), IpcMode: dockercontainer.IpcMode(sandboxNSMode),
PidMode: namespaceModeHost,
}, },
}, },
} }

View file

@ -371,23 +371,22 @@ func (m *managerImpl) reclaimNodeLevelResources(resourceToReclaim v1.ResourceNam
for _, nodeReclaimFunc := range nodeReclaimFuncs { for _, nodeReclaimFunc := range nodeReclaimFuncs {
// attempt to reclaim the pressured resource. // attempt to reclaim the pressured resource.
reclaimed, err := nodeReclaimFunc() reclaimed, err := nodeReclaimFunc()
if err == nil { if err != nil {
// update our local observations based on the amount reported to have been reclaimed. glog.Warningf("eviction manager: unexpected error when attempting to reduce %v pressure: %v", resourceToReclaim, err)
// note: this is optimistic, other things could have been still consuming the pressured resource in the interim. }
signal := resourceToSignal[resourceToReclaim] // update our local observations based on the amount reported to have been reclaimed.
value, ok := observations[signal] // note: this is optimistic, other things could have been still consuming the pressured resource in the interim.
if !ok { signal := resourceToSignal[resourceToReclaim]
glog.Errorf("eviction manager: unable to find value associated with signal %v", signal) value, ok := observations[signal]
continue if !ok {
} glog.Errorf("eviction manager: unable to find value associated with signal %v", signal)
value.available.Add(*reclaimed) continue
}
value.available.Add(*reclaimed)
// evaluate all current thresholds to see if with adjusted observations, we think we have met min reclaim goals // evaluate all current thresholds to see if with adjusted observations, we think we have met min reclaim goals
if len(thresholdsMet(m.thresholdsMet, observations, true)) == 0 { if len(thresholdsMet(m.thresholdsMet, observations, true)) == 0 {
return true return true
}
} else {
glog.Errorf("eviction manager: unexpected error when attempting to reduce %v pressure: %v", resourceToReclaim, err)
} }
} }
return false return false

View file

@ -995,13 +995,10 @@ func deleteImages(imageGC ImageGC, reportBytesFreed bool) nodeReclaimFunc {
return func() (*resource.Quantity, error) { return func() (*resource.Quantity, error) {
glog.Infof("eviction manager: attempting to delete unused images") glog.Infof("eviction manager: attempting to delete unused images")
bytesFreed, err := imageGC.DeleteUnusedImages() bytesFreed, err := imageGC.DeleteUnusedImages()
if err != nil {
return nil, err
}
reclaimed := int64(0) reclaimed := int64(0)
if reportBytesFreed { if reportBytesFreed {
reclaimed = bytesFreed reclaimed = bytesFreed
} }
return resource.NewQuantity(reclaimed, resource.BinarySI), nil return resource.NewQuantity(reclaimed, resource.BinarySI), err
} }
} }

View file

@ -76,7 +76,8 @@ type NodeProvider interface {
// ImageGC is responsible for performing garbage collection of unused images. // ImageGC is responsible for performing garbage collection of unused images.
type ImageGC interface { type ImageGC interface {
// DeleteUnusedImages deletes unused images and returns the number of bytes freed, or an error. // DeleteUnusedImages deletes unused images and returns the number of bytes freed, and an error.
// This returns the bytes freed even if an error is returned.
DeleteUnusedImages() (int64, error) DeleteUnusedImages() (int64, error)
} }
@ -118,6 +119,8 @@ type thresholdsObservedAt map[evictionapi.Threshold]time.Time
type nodeConditionsObservedAt map[v1.NodeConditionType]time.Time type nodeConditionsObservedAt map[v1.NodeConditionType]time.Time
// nodeReclaimFunc is a function that knows how to reclaim a resource from the node without impacting pods. // nodeReclaimFunc is a function that knows how to reclaim a resource from the node without impacting pods.
// Returns the quantity of resources reclaimed and an error, if applicable.
// nodeReclaimFunc return the resources reclaimed even if an error occurs.
type nodeReclaimFunc func() (*resource.Quantity, error) type nodeReclaimFunc func() (*resource.Quantity, error)
// nodeReclaimFuncs is an ordered list of nodeReclaimFunc // nodeReclaimFuncs is an ordered list of nodeReclaimFunc

View file

@ -2053,7 +2053,7 @@ func (kl *Kubelet) updateRuntimeUp() {
} }
// Only check specific conditions when runtime integration type is cri, // Only check specific conditions when runtime integration type is cri,
// because the old integration doesn't populate any runtime condition. // because the old integration doesn't populate any runtime condition.
if kl.kubeletConfiguration.EnableCRI { if kl.kubeletConfiguration.EnableCRI && kl.kubeletConfiguration.ContainerRuntime != "rkt" {
if s == nil { if s == nil {
glog.Errorf("Container runtime status is nil") glog.Errorf("Container runtime status is nil")
return return

View file

@ -55,6 +55,7 @@ import (
"k8s.io/kubernetes/pkg/kubelet/status" "k8s.io/kubernetes/pkg/kubelet/status"
kubetypes "k8s.io/kubernetes/pkg/kubelet/types" kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
"k8s.io/kubernetes/pkg/kubelet/util/format" "k8s.io/kubernetes/pkg/kubelet/util/format"
"k8s.io/kubernetes/pkg/util"
"k8s.io/kubernetes/pkg/util/term" "k8s.io/kubernetes/pkg/util/term"
"k8s.io/kubernetes/pkg/volume" "k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/util/volumehelper" "k8s.io/kubernetes/pkg/volume/util/volumehelper"
@ -135,7 +136,32 @@ func makeMounts(pod *v1.Pod, podDir string, container *v1.Container, hostName, h
return nil, err return nil, err
} }
if mount.SubPath != "" { if mount.SubPath != "" {
fileinfo, err := os.Lstat(hostPath)
if err != nil {
return nil, err
}
perm := fileinfo.Mode()
hostPath = filepath.Join(hostPath, mount.SubPath) hostPath = filepath.Join(hostPath, mount.SubPath)
if subPathExists, err := util.FileExists(hostPath); err != nil {
glog.Errorf("Could not determine if subPath %s exists; will not attempt to change its permissions", hostPath)
} else if !subPathExists {
// Create the sub path now because if it's auto-created later when referenced, it may have an
// incorrect ownership and mode. For example, the sub path directory must have at least g+rwx
// when the pod specifies an fsGroup, and if the directory is not created here, Docker will
// later auto-create it with the incorrect mode 0750
if err := os.MkdirAll(hostPath, perm); err != nil {
glog.Errorf("failed to mkdir:%s", hostPath)
return nil, err
}
// chmod the sub path because umask may have prevented us from making the sub path with the same
// permissions as the mounter path
if err := os.Chmod(hostPath, perm); err != nil {
return nil, err
}
}
} }
// Docker Volume Mounts fail on Windows if it is not of the form C:/ // Docker Volume Mounts fail on Windows if it is not of the form C:/

View file

@ -357,10 +357,10 @@ func getTerminationMessage(status *runtimeapi.ContainerStatus, terminationMessag
// readLastStringFromContainerLogs attempts to read up to the max log length from the end of the CRI log represented // readLastStringFromContainerLogs attempts to read up to the max log length from the end of the CRI log represented
// by path. It reads up to max log lines. // by path. It reads up to max log lines.
func readLastStringFromContainerLogs(path string) string { func (m *kubeGenericRuntimeManager) readLastStringFromContainerLogs(path string) string {
value := int64(kubecontainer.MaxContainerTerminationMessageLogLines) value := int64(kubecontainer.MaxContainerTerminationMessageLogLines)
buf, _ := circbuf.NewBuffer(kubecontainer.MaxContainerTerminationMessageLogLength) buf, _ := circbuf.NewBuffer(kubecontainer.MaxContainerTerminationMessageLogLength)
if err := ReadLogs(path, &v1.PodLogOptions{TailLines: &value}, buf, buf); err != nil { if err := m.ReadLogs(path, "", &v1.PodLogOptions{TailLines: &value}, buf, buf); err != nil {
return fmt.Sprintf("Error on reading termination message from logs: %v", err) return fmt.Sprintf("Error on reading termination message from logs: %v", err)
} }
return buf.String() return buf.String()
@ -414,7 +414,7 @@ func (m *kubeGenericRuntimeManager) getPodContainerStatuses(uid kubetypes.UID, n
tMessage, checkLogs := getTerminationMessage(status, annotatedInfo.TerminationMessagePath, fallbackToLogs) tMessage, checkLogs := getTerminationMessage(status, annotatedInfo.TerminationMessagePath, fallbackToLogs)
if checkLogs { if checkLogs {
path := buildFullContainerLogsPath(uid, labeledInfo.ContainerName, annotatedInfo.RestartCount) path := buildFullContainerLogsPath(uid, labeledInfo.ContainerName, annotatedInfo.RestartCount)
tMessage = readLastStringFromContainerLogs(path) tMessage = m.readLastStringFromContainerLogs(path)
} }
// Use the termination message written by the application is not empty // Use the termination message written by the application is not empty
if len(tMessage) != 0 { if len(tMessage) != 0 {
@ -688,7 +688,7 @@ func (m *kubeGenericRuntimeManager) GetContainerLogs(pod *v1.Pod, containerID ku
labeledInfo := getContainerInfoFromLabels(status.Labels) labeledInfo := getContainerInfoFromLabels(status.Labels)
annotatedInfo := getContainerInfoFromAnnotations(status.Annotations) annotatedInfo := getContainerInfoFromAnnotations(status.Annotations)
path := buildFullContainerLogsPath(pod.UID, labeledInfo.ContainerName, annotatedInfo.RestartCount) path := buildFullContainerLogsPath(pod.UID, labeledInfo.ContainerName, annotatedInfo.RestartCount)
return ReadLogs(path, logOptions, stdout, stderr) return m.ReadLogs(path, containerID.ID, logOptions, stdout, stderr)
} }
// GetExec gets the endpoint the runtime will serve the exec request from. // GetExec gets the endpoint the runtime will serve the exec request from.

View file

@ -32,6 +32,7 @@ import (
"github.com/golang/glog" "github.com/golang/glog"
"k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/api/v1"
runtimeapi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
"k8s.io/kubernetes/pkg/util/tail" "k8s.io/kubernetes/pkg/util/tail"
) )
@ -54,6 +55,11 @@ const (
timeFormat = time.RFC3339Nano timeFormat = time.RFC3339Nano
// blockSize is the block size used in tail. // blockSize is the block size used in tail.
blockSize = 1024 blockSize = 1024
// stateCheckPeriod is the period to check container state while following
// the container log. Kubelet should not keep following the log when the
// container is not running.
stateCheckPeriod = 5 * time.Second
) )
var ( var (
@ -110,7 +116,9 @@ func newLogOptions(apiOpts *v1.PodLogOptions, now time.Time) *logOptions {
} }
// ReadLogs read the container log and redirect into stdout and stderr. // ReadLogs read the container log and redirect into stdout and stderr.
func ReadLogs(path string, apiOpts *v1.PodLogOptions, stdout, stderr io.Writer) error { // Note that containerID is only needed when following the log, or else
// just pass in empty string "".
func (m *kubeGenericRuntimeManager) ReadLogs(path, containerID string, apiOpts *v1.PodLogOptions, stdout, stderr io.Writer) error {
f, err := os.Open(path) f, err := os.Open(path)
if err != nil { if err != nil {
return fmt.Errorf("failed to open log file %q: %v", path, err) return fmt.Errorf("failed to open log file %q: %v", path, err)
@ -166,8 +174,8 @@ func ReadLogs(path string, apiOpts *v1.PodLogOptions, stdout, stderr io.Writer)
} }
} }
// Wait until the next log change. // Wait until the next log change.
if err := waitLogs(watcher); err != nil { if found, err := m.waitLogs(containerID, watcher); !found {
return fmt.Errorf("failed to wait logs for log file %q: %v", path, err) return err
} }
continue continue
} }
@ -196,6 +204,41 @@ func ReadLogs(path string, apiOpts *v1.PodLogOptions, stdout, stderr io.Writer)
} }
} }
// waitLogs wait for the next log write. It returns a boolean and an error. The boolean
// indicates whether a new log is found; the error is error happens during waiting new logs.
func (m *kubeGenericRuntimeManager) waitLogs(id string, w *fsnotify.Watcher) (bool, error) {
errRetry := 5
for {
select {
case e := <-w.Events:
switch e.Op {
case fsnotify.Write:
return true, nil
default:
glog.Errorf("Unexpected fsnotify event: %v, retrying...", e)
}
case err := <-w.Errors:
glog.Errorf("Fsnotify watch error: %v, %d error retries remaining", err, errRetry)
if errRetry == 0 {
return false, err
}
errRetry--
case <-time.After(stateCheckPeriod):
s, err := m.runtimeService.ContainerStatus(id)
if err != nil {
return false, err
}
// Only keep following container log when it is running.
if s.State != runtimeapi.ContainerState_CONTAINER_RUNNING {
glog.Errorf("Container %q is not running (state=%q)", id, s.State)
// Do not return error because it's normal that the container stops
// during waiting.
return false, nil
}
}
}
}
// parseFunc is a function parsing one log line to the internal log type. // parseFunc is a function parsing one log line to the internal log type.
// Notice that the caller must make sure logMessage is not nil. // Notice that the caller must make sure logMessage is not nil.
type parseFunc func([]byte, *logMessage) error type parseFunc func([]byte, *logMessage) error
@ -267,28 +310,6 @@ func getParseFunc(log []byte) (parseFunc, error) {
return nil, fmt.Errorf("unsupported log format: %q", log) return nil, fmt.Errorf("unsupported log format: %q", log)
} }
// waitLogs wait for the next log write.
func waitLogs(w *fsnotify.Watcher) error {
errRetry := 5
for {
select {
case e := <-w.Events:
switch e.Op {
case fsnotify.Write:
return nil
default:
glog.Errorf("Unexpected fsnotify event: %v, retrying...", e)
}
case err := <-w.Errors:
glog.Errorf("Fsnotify watch error: %v, %d error retries remaining", err, errRetry)
if errRetry == 0 {
return err
}
errRetry--
}
}
}
// logWriter controls the writing into the stream based on the log options. // logWriter controls the writing into the stream based on the log options.
type logWriter struct { type logWriter struct {
stdout io.Writer stdout io.Writer

View file

@ -41,24 +41,24 @@ func (m *kubeGenericRuntimeManager) determineEffectiveSecurityContext(pod *v1.Po
} }
// set namespace options and supplemental groups. // set namespace options and supplemental groups.
podSc := pod.Spec.SecurityContext
if podSc == nil {
return synthesized
}
synthesized.NamespaceOptions = &runtimeapi.NamespaceOption{ synthesized.NamespaceOptions = &runtimeapi.NamespaceOption{
HostNetwork: pod.Spec.HostNetwork, HostNetwork: pod.Spec.HostNetwork,
HostIpc: pod.Spec.HostIPC, HostIpc: pod.Spec.HostIPC,
HostPid: pod.Spec.HostPID, HostPid: pod.Spec.HostPID,
} }
if podSc.FSGroup != nil { podSc := pod.Spec.SecurityContext
synthesized.SupplementalGroups = append(synthesized.SupplementalGroups, *podSc.FSGroup) if podSc != nil {
if podSc.FSGroup != nil {
synthesized.SupplementalGroups = append(synthesized.SupplementalGroups, *podSc.FSGroup)
}
if podSc.SupplementalGroups != nil {
synthesized.SupplementalGroups = append(synthesized.SupplementalGroups, podSc.SupplementalGroups...)
}
} }
if groups := m.runtimeHelper.GetExtraSupplementalGroupsForPod(pod); len(groups) > 0 { if groups := m.runtimeHelper.GetExtraSupplementalGroupsForPod(pod); len(groups) > 0 {
synthesized.SupplementalGroups = append(synthesized.SupplementalGroups, groups...) synthesized.SupplementalGroups = append(synthesized.SupplementalGroups, groups...)
} }
if podSc.SupplementalGroups != nil {
synthesized.SupplementalGroups = append(synthesized.SupplementalGroups, podSc.SupplementalGroups...)
}
return synthesized return synthesized
} }

View file

@ -209,7 +209,11 @@ func (r *RemoteRuntimeService) StartContainer(containerID string) error {
// StopContainer stops a running container with a grace period (i.e., timeout). // StopContainer stops a running container with a grace period (i.e., timeout).
func (r *RemoteRuntimeService) StopContainer(containerID string, timeout int64) error { func (r *RemoteRuntimeService) StopContainer(containerID string, timeout int64) error {
ctx, cancel := getContextWithTimeout(r.timeout) ctx, cancel := getContextWithTimeout(time.Duration(timeout) * time.Second)
if timeout == 0 {
// Use default timeout if stop timeout is 0.
ctx, cancel = getContextWithTimeout(r.timeout)
}
defer cancel() defer cancel()
_, err := r.runtimeClient.StopContainer(ctx, &runtimeapi.StopContainerRequest{ _, err := r.runtimeClient.StopContainer(ctx, &runtimeapi.StopContainerRequest{

View file

@ -11,20 +11,17 @@ load(
go_library( go_library(
name = "go_default_library", name = "go_default_library",
srcs = [ srcs = [
"api.go",
"doc.go", "doc.go",
"healthcheck.go", "healthcheck.go",
"http.go",
"listener.go",
"worker.go",
], ],
tags = ["automanaged"], tags = ["automanaged"],
deps = [ deps = [
"//vendor:github.com/golang/glog", "//vendor:github.com/golang/glog",
"//vendor:github.com/renstrom/dedent",
"//vendor:k8s.io/apimachinery/pkg/types", "//vendor:k8s.io/apimachinery/pkg/types",
"//vendor:k8s.io/apimachinery/pkg/util/sets", "//vendor:k8s.io/client-go/pkg/api",
"//vendor:k8s.io/apimachinery/pkg/util/wait", "//vendor:k8s.io/client-go/pkg/api/v1",
"//vendor:k8s.io/client-go/tools/cache", "//vendor:k8s.io/client-go/tools/record",
], ],
) )
@ -34,6 +31,7 @@ go_test(
library = ":go_default_library", library = ":go_default_library",
tags = ["automanaged"], tags = ["automanaged"],
deps = [ deps = [
"//vendor:github.com/davecgh/go-spew/spew",
"//vendor:k8s.io/apimachinery/pkg/types", "//vendor:k8s.io/apimachinery/pkg/types",
"//vendor:k8s.io/apimachinery/pkg/util/sets", "//vendor:k8s.io/apimachinery/pkg/util/sets",
], ],

View file

@ -1,65 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package healthcheck
import (
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait"
)
// All public API Methods for this package
// UpdateEndpoints Update the set of local endpoints for a service
func UpdateEndpoints(serviceName types.NamespacedName, endpointUids sets.String) {
req := &proxyMutationRequest{
serviceName: serviceName,
endpointUids: &endpointUids,
}
healthchecker.mutationRequestChannel <- req
}
func updateServiceListener(serviceName types.NamespacedName, listenPort int, add bool) bool {
responseChannel := make(chan bool)
req := &proxyListenerRequest{
serviceName: serviceName,
listenPort: uint16(listenPort),
add: add,
responseChannel: responseChannel,
}
healthchecker.listenerRequestChannel <- req
return <-responseChannel
}
// AddServiceListener Request addition of a listener for a service's health check
func AddServiceListener(serviceName types.NamespacedName, listenPort int) bool {
return updateServiceListener(serviceName, listenPort, true)
}
// DeleteServiceListener Request deletion of a listener for a service's health check
func DeleteServiceListener(serviceName types.NamespacedName, listenPort int) bool {
return updateServiceListener(serviceName, listenPort, false)
}
// Run Start the healthchecker main loop
func Run() {
healthchecker = proxyHealthCheckFactory()
// Wrap with a wait.Forever to handle panics.
go wait.Forever(func() {
healthchecker.handlerLoop()
}, 0)
}

View file

@ -14,5 +14,5 @@ See the License for the specific language governing permissions and
limitations under the License. limitations under the License.
*/ */
// Package healthcheck LoadBalancer Healthcheck responder library for kubernetes network proxies // Package healthcheck provides tools for serving kube-proxy healthchecks.
package healthcheck // import "k8s.io/kubernetes/pkg/proxy/healthcheck" package healthcheck // import "k8s.io/kubernetes/pkg/proxy/healthcheck"

View file

@ -20,108 +20,216 @@ import (
"fmt" "fmt"
"net" "net"
"net/http" "net/http"
"strings"
"sync"
"github.com/golang/glog" "github.com/golang/glog"
"github.com/renstrom/dedent"
"k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets" "k8s.io/client-go/pkg/api"
"k8s.io/client-go/tools/cache" clientv1 "k8s.io/client-go/pkg/api/v1"
"k8s.io/client-go/tools/record"
) )
// proxyMutationRequest: Message to request addition/deletion of endpoints for a service // Server serves HTTP endpoints for each service name, with results
type proxyMutationRequest struct { // based on the endpoints. If there are 0 endpoints for a service, it returns a
serviceName types.NamespacedName // 503 "Service Unavailable" error (telling LBs not to use this node). If there
endpointUids *sets.String // are 1 or more endpoints, it returns a 200 "OK".
type Server interface {
// Make the new set of services be active. Services that were open before
// will be closed. Services that are new will be opened. Service that
// existed and are in the new set will be left alone. The value of the map
// is the healthcheck-port to listen on.
SyncServices(newServices map[types.NamespacedName]uint16) error
// Make the new set of endpoints be active. Endpoints for services that do
// not exist will be dropped. The value of the map is the number of
// endpoints the service has on this node.
SyncEndpoints(newEndpoints map[types.NamespacedName]int) error
} }
// proxyListenerRequest: Message to request addition/deletion of a service responder on a listening port // Listener allows for testing of Server. If the Listener argument
type proxyListenerRequest struct { // to NewServer() is nil, the real net.Listen function will be used.
serviceName types.NamespacedName type Listener interface {
listenPort uint16 // Listen is very much like net.Listen, except the first arg (network) is
add bool // fixed to be "tcp".
responseChannel chan bool Listen(addr string) (net.Listener, error)
} }
// serviceEndpointsList: A list of endpoints for a service // HTTPServerFactory allows for testing of Server. If the
type serviceEndpointsList struct { // HTTPServerFactory argument to NewServer() is nil, the real
serviceName types.NamespacedName // http.Server type will be used.
endpoints *sets.String type HTTPServerFactory interface {
// New creates an instance of a type satisfying HTTPServer. This is
// designed to include http.Server.
New(addr string, handler http.Handler) HTTPServer
} }
// serviceResponder: Contains net/http datastructures necessary for responding to each Service's health check on its aux nodePort // HTTPServer allows for testing of Server.
type serviceResponder struct { type HTTPServer interface {
serviceName types.NamespacedName // Server is designed so that http.Server satifies this interface,
listenPort uint16 Serve(listener net.Listener) error
listener *net.Listener
server *http.Server
} }
// proxyHC: Handler structure for health check, endpoint add/delete and service listener add/delete requests // NewServer allocates a new healthcheck server manager. If either
type proxyHC struct { // of the injected arguments are nil, defaults will be used.
serviceEndpointsMap cache.ThreadSafeStore func NewServer(hostname string, recorder record.EventRecorder, listener Listener, httpServerFactory HTTPServerFactory) Server {
serviceResponderMap map[types.NamespacedName]serviceResponder if listener == nil {
mutationRequestChannel chan *proxyMutationRequest listener = stdNetListener{}
listenerRequestChannel chan *proxyListenerRequest
}
// handleHealthCheckRequest - received a health check request - lookup and respond to HC.
func (h *proxyHC) handleHealthCheckRequest(rw http.ResponseWriter, serviceName string) {
s, ok := h.serviceEndpointsMap.Get(serviceName)
if !ok {
glog.V(4).Infof("Service %s not found or has no local endpoints", serviceName)
sendHealthCheckResponse(rw, http.StatusServiceUnavailable, "No Service Endpoints Found")
return
} }
numEndpoints := len(*s.(*serviceEndpointsList).endpoints) if httpServerFactory == nil {
if numEndpoints > 0 { httpServerFactory = stdHTTPServerFactory{}
sendHealthCheckResponse(rw, http.StatusOK, fmt.Sprintf("%d Service Endpoints found", numEndpoints)) }
return return &server{
hostname: hostname,
recorder: recorder,
listener: listener,
httpFactory: httpServerFactory,
services: map[types.NamespacedName]*hcInstance{},
} }
sendHealthCheckResponse(rw, http.StatusServiceUnavailable, "0 local Endpoints are alive")
} }
// handleMutationRequest - receive requests to mutate the table entry for a service // Implement Listener in terms of net.Listen.
func (h *proxyHC) handleMutationRequest(req *proxyMutationRequest) { type stdNetListener struct{}
numEndpoints := len(*req.endpointUids)
glog.V(4).Infof("LB service health check mutation request Service: %s - %d Endpoints %v", func (stdNetListener) Listen(addr string) (net.Listener, error) {
req.serviceName, numEndpoints, (*req.endpointUids).List()) return net.Listen("tcp", addr)
if numEndpoints == 0 { }
if _, ok := h.serviceEndpointsMap.Get(req.serviceName.String()); ok {
glog.V(4).Infof("Deleting endpoints map for service %s, all local endpoints gone", req.serviceName.String()) var _ Listener = stdNetListener{}
h.serviceEndpointsMap.Delete(req.serviceName.String())
} // Implement HTTPServerFactory in terms of http.Server.
return type stdHTTPServerFactory struct{}
func (stdHTTPServerFactory) New(addr string, handler http.Handler) HTTPServer {
return &http.Server{
Addr: addr,
Handler: handler,
} }
var entry *serviceEndpointsList }
e, exists := h.serviceEndpointsMap.Get(req.serviceName.String())
if exists { var _ HTTPServerFactory = stdHTTPServerFactory{}
entry = e.(*serviceEndpointsList)
if entry.endpoints.Equal(*req.endpointUids) { type server struct {
return hostname string
} recorder record.EventRecorder // can be nil
// Compute differences just for printing logs about additions and removals listener Listener
deletedEndpoints := entry.endpoints.Difference(*req.endpointUids) httpFactory HTTPServerFactory
newEndpoints := req.endpointUids.Difference(*entry.endpoints)
for _, e := range newEndpoints.List() { lock sync.Mutex
glog.V(4).Infof("Adding local endpoint %s to LB health check for service %s", services map[types.NamespacedName]*hcInstance
e, req.serviceName.String()) }
}
for _, d := range deletedEndpoints.List() { func (hcs *server) SyncServices(newServices map[types.NamespacedName]uint16) error {
glog.V(4).Infof("Deleted endpoint %s from service %s LB health check (%d endpoints left)", hcs.lock.Lock()
d, req.serviceName.String(), len(*entry.endpoints)) defer hcs.lock.Unlock()
// Remove any that are not needed any more.
for nsn, svc := range hcs.services {
if port, found := newServices[nsn]; !found || port != svc.port {
glog.V(2).Infof("Closing healthcheck %q on port %d", nsn.String(), svc.port)
if err := svc.listener.Close(); err != nil {
glog.Errorf("Close(%v): %v", svc.listener.Addr(), err)
}
delete(hcs.services, nsn)
} }
} }
entry = &serviceEndpointsList{serviceName: req.serviceName, endpoints: req.endpointUids}
h.serviceEndpointsMap.Add(req.serviceName.String(), entry) // Add any that are needed.
for nsn, port := range newServices {
if hcs.services[nsn] != nil {
glog.V(3).Infof("Existing healthcheck %q on port %d", nsn.String(), port)
continue
}
glog.V(2).Infof("Opening healthcheck %q on port %d", nsn.String(), port)
svc := &hcInstance{port: port}
addr := fmt.Sprintf(":%d", port)
svc.server = hcs.httpFactory.New(addr, hcHandler{name: nsn, hcs: hcs})
var err error
svc.listener, err = hcs.listener.Listen(addr)
if err != nil {
msg := fmt.Sprintf("node %s failed to start healthcheck %q on port %d: %v", hcs.hostname, nsn.String(), port, err)
if hcs.recorder != nil {
hcs.recorder.Eventf(
&clientv1.ObjectReference{
Kind: "Service",
Namespace: nsn.Namespace,
Name: nsn.Name,
UID: types.UID(nsn.String()),
}, api.EventTypeWarning, "FailedToStartHealthcheck", msg)
}
glog.Error(msg)
continue
}
hcs.services[nsn] = svc
go func(nsn types.NamespacedName, svc *hcInstance) {
// Serve() will exit when the listener is closed.
glog.V(3).Infof("Starting goroutine for healthcheck %q on port %d", nsn.String(), svc.port)
if err := svc.server.Serve(svc.listener); err != nil {
glog.V(3).Infof("Healthcheck %q closed: %v", nsn.String(), err)
return
}
glog.V(3).Infof("Healthcheck %q closed", nsn.String())
}(nsn, svc)
}
return nil
} }
// proxyHealthCheckRequest - Factory method to instantiate the health check handler type hcInstance struct {
func proxyHealthCheckFactory() *proxyHC { port uint16
glog.V(2).Infof("Initializing kube-proxy health checker") listener net.Listener
phc := &proxyHC{ server HTTPServer
serviceEndpointsMap: cache.NewThreadSafeStore(cache.Indexers{}, cache.Indices{}), endpoints int // number of local endpoints for a service
serviceResponderMap: make(map[types.NamespacedName]serviceResponder), }
mutationRequestChannel: make(chan *proxyMutationRequest, 1024),
listenerRequestChannel: make(chan *proxyListenerRequest, 1024), type hcHandler struct {
} name types.NamespacedName
return phc hcs *server
}
var _ http.Handler = hcHandler{}
func (h hcHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request) {
h.hcs.lock.Lock()
count := h.hcs.services[h.name].endpoints
h.hcs.lock.Unlock()
resp.Header().Set("Content-Type", "application/json")
if count == 0 {
resp.WriteHeader(http.StatusServiceUnavailable)
} else {
resp.WriteHeader(http.StatusOK)
}
fmt.Fprintf(resp, strings.Trim(dedent.Dedent(fmt.Sprintf(`
{
"service": {
"namespace": %q,
"name": %q
},
"localEndpoints": %d
}
`, h.name.Namespace, h.name.Name, count)), "\n"))
}
func (hcs *server) SyncEndpoints(newEndpoints map[types.NamespacedName]int) error {
hcs.lock.Lock()
defer hcs.lock.Unlock()
for nsn, count := range newEndpoints {
if hcs.services[nsn] == nil {
glog.V(3).Infof("Not saving endpoints for unknown healthcheck %q", nsn.String())
continue
}
glog.V(3).Infof("Reporting %d endpoints for healthcheck %q", count, nsn.String())
hcs.services[nsn].endpoints = count
}
for nsn, hci := range hcs.services {
if _, found := newEndpoints[nsn]; !found {
hci.endpoints = 0
}
}
return nil
} }

View file

@ -17,142 +17,341 @@ limitations under the License.
package healthcheck package healthcheck
import ( import (
"fmt" "encoding/json"
"io/ioutil" "net"
"math/rand"
"net/http" "net/http"
"net/http/httptest"
"testing" "testing"
"time"
"github.com/davecgh/go-spew/spew"
"k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/sets"
) )
type TestCaseData struct { type fakeListener struct {
nodePorts int openPorts sets.String
numEndpoints int
nodePortList []int
svcNames []types.NamespacedName
} }
const ( func newFakeListener() *fakeListener {
startPort = 20000 return &fakeListener{
endPort = 40000 openPorts: sets.String{},
) }
}
var (
choices = []byte("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ") func (fake *fakeListener) hasPort(addr string) bool {
) return fake.openPorts.Has(addr)
}
func generateRandomString(n int) string {
func (fake *fakeListener) Listen(addr string) (net.Listener, error) {
b := make([]byte, n) fake.openPorts.Insert(addr)
l := len(choices) return &fakeNetListener{
for i := range b { parent: fake,
b[i] = choices[rand.Intn(l)] addr: addr,
} }, nil
return string(b) }
}
type fakeNetListener struct {
func chooseServiceName(tc int, hint int) types.NamespacedName { parent *fakeListener
var svc types.NamespacedName addr string
svc.Namespace = fmt.Sprintf("ns_%d", tc) }
svc.Name = fmt.Sprintf("name_%d", hint)
return svc func (fake *fakeNetListener) Accept() (net.Conn, error) {
} // Not implemented
return nil, nil
func generateEndpointSet(max int) sets.String { }
s := sets.NewString()
for i := 0; i < max; i++ { func (fake *fakeNetListener) Close() error {
s.Insert(fmt.Sprintf("%d%s", i, generateRandomString(8))) fake.parent.openPorts.Delete(fake.addr)
} return nil
return s }
}
func (fake *fakeNetListener) Addr() net.Addr {
func verifyHealthChecks(tc *TestCaseData, t *testing.T) bool { // Not implemented
var success = true return nil
time.Sleep(100 * time.Millisecond) }
for i := 0; i < tc.nodePorts; i++ {
t.Logf("Validating HealthCheck works for svc %s nodePort %d\n", tc.svcNames[i], tc.nodePortList[i]) type fakeHTTPServerFactory struct{}
res, err := http.Get(fmt.Sprintf("http://127.0.0.1:%d/", tc.nodePortList[i]))
if err != nil { func newFakeHTTPServerFactory() *fakeHTTPServerFactory {
t.Logf("ERROR: Failed to connect to listening port") return &fakeHTTPServerFactory{}
success = false }
continue
} func (fake *fakeHTTPServerFactory) New(addr string, handler http.Handler) HTTPServer {
robots, err := ioutil.ReadAll(res.Body) return &fakeHTTPServer{
if res.StatusCode == http.StatusServiceUnavailable { addr: addr,
t.Logf("ERROR: HealthCheck returned %s: %s", res.Status, string(robots)) handler: handler,
success = false }
continue }
}
res.Body.Close() type fakeHTTPServer struct {
if err != nil { addr string
t.Logf("Error: reading body of response (%s)", err) handler http.Handler
success = false }
continue
} func (fake *fakeHTTPServer) Serve(listener net.Listener) error {
} return nil // Cause the goroutine to return
if success { }
t.Logf("Success: All nodePorts found active")
} func mknsn(ns, name string) types.NamespacedName {
return success return types.NamespacedName{
} Namespace: ns,
Name: name,
func TestHealthChecker(t *testing.T) { }
testcases := []TestCaseData{ }
{
nodePorts: 1, type hcPayload struct {
numEndpoints: 2, Service struct {
}, Namespace string
{ Name string
nodePorts: 10, }
numEndpoints: 6, LocalEndpoints int
}, }
{
nodePorts: 100, func TestServer(t *testing.T) {
numEndpoints: 1, listener := newFakeListener()
}, httpFactory := newFakeHTTPServerFactory()
}
hcsi := NewServer("hostname", nil, listener, httpFactory)
Run() hcs := hcsi.(*server)
if len(hcs.services) != 0 {
ports := startPort t.Errorf("expected 0 services, got %d", len(hcs.services))
for n, tc := range testcases { }
tc.nodePortList = make([]int, tc.nodePorts)
tc.svcNames = make([]types.NamespacedName, tc.nodePorts) // sync nothing
for i := 0; i < tc.nodePorts; i++ { hcs.SyncServices(nil)
tc.svcNames[i] = chooseServiceName(n, i) if len(hcs.services) != 0 {
t.Logf("Updating endpoints map for %s %d", tc.svcNames[i], tc.numEndpoints) t.Errorf("expected 0 services, got %d", len(hcs.services))
for { }
UpdateEndpoints(tc.svcNames[i], generateEndpointSet(tc.numEndpoints)) hcs.SyncEndpoints(nil)
tc.nodePortList[i] = ports if len(hcs.services) != 0 {
ports++ t.Errorf("expected 0 services, got %d", len(hcs.services))
if AddServiceListener(tc.svcNames[i], tc.nodePortList[i]) { }
break
} // sync unknown endpoints, should be dropped
DeleteServiceListener(tc.svcNames[i], tc.nodePortList[i]) hcs.SyncEndpoints(map[types.NamespacedName]int{mknsn("a", "b"): 93})
// Keep searching for a port that works if len(hcs.services) != 0 {
t.Logf("Failed to bind/listen on port %d...trying next port", ports-1) t.Errorf("expected 0 services, got %d", len(hcs.services))
if ports > endPort { }
t.Errorf("Exhausted range of ports available for tests")
return // sync a real service
} nsn := mknsn("a", "b")
} hcs.SyncServices(map[types.NamespacedName]uint16{nsn: 9376})
} if len(hcs.services) != 1 {
t.Logf("Validating if all nodePorts for tc %d work", n) t.Errorf("expected 1 service, got %d", len(hcs.services))
if !verifyHealthChecks(&tc, t) { }
t.Errorf("Healthcheck validation failed") if hcs.services[nsn].endpoints != 0 {
} t.Errorf("expected 0 endpoints, got %d", hcs.services[nsn].endpoints)
}
for i := 0; i < tc.nodePorts; i++ { if len(listener.openPorts) != 1 {
DeleteServiceListener(tc.svcNames[i], tc.nodePortList[i]) t.Errorf("expected 1 open port, got %d\n%s", len(listener.openPorts), spew.Sdump(listener.openPorts))
UpdateEndpoints(tc.svcNames[i], sets.NewString()) }
} if !listener.hasPort(":9376") {
t.Errorf("expected port :9376 to be open\n%s", spew.Sdump(listener.openPorts))
// Ensure that all listeners have been shutdown }
if verifyHealthChecks(&tc, t) { // test the handler
t.Errorf("Healthcheck validation failed") testHandler(hcs, nsn, http.StatusServiceUnavailable, 0, t)
}
// sync an endpoint
hcs.SyncEndpoints(map[types.NamespacedName]int{nsn: 18})
if len(hcs.services) != 1 {
t.Errorf("expected 1 service, got %d", len(hcs.services))
}
if hcs.services[nsn].endpoints != 18 {
t.Errorf("expected 18 endpoints, got %d", hcs.services[nsn].endpoints)
}
// test the handler
testHandler(hcs, nsn, http.StatusOK, 18, t)
// sync zero endpoints
hcs.SyncEndpoints(map[types.NamespacedName]int{nsn: 0})
if len(hcs.services) != 1 {
t.Errorf("expected 1 service, got %d", len(hcs.services))
}
if hcs.services[nsn].endpoints != 0 {
t.Errorf("expected 0 endpoints, got %d", hcs.services[nsn].endpoints)
}
// test the handler
testHandler(hcs, nsn, http.StatusServiceUnavailable, 0, t)
// put the endpoint back
hcs.SyncEndpoints(map[types.NamespacedName]int{nsn: 11})
if len(hcs.services) != 1 {
t.Errorf("expected 1 service, got %d", len(hcs.services))
}
if hcs.services[nsn].endpoints != 11 {
t.Errorf("expected 18 endpoints, got %d", hcs.services[nsn].endpoints)
}
// sync nil endpoints
hcs.SyncEndpoints(nil)
if len(hcs.services) != 1 {
t.Errorf("expected 1 service, got %d", len(hcs.services))
}
if hcs.services[nsn].endpoints != 0 {
t.Errorf("expected 0 endpoints, got %d", hcs.services[nsn].endpoints)
}
// test the handler
testHandler(hcs, nsn, http.StatusServiceUnavailable, 0, t)
// put the endpoint back
hcs.SyncEndpoints(map[types.NamespacedName]int{nsn: 18})
if len(hcs.services) != 1 {
t.Errorf("expected 1 service, got %d", len(hcs.services))
}
if hcs.services[nsn].endpoints != 18 {
t.Errorf("expected 18 endpoints, got %d", hcs.services[nsn].endpoints)
}
// delete the service
hcs.SyncServices(nil)
if len(hcs.services) != 0 {
t.Errorf("expected 0 services, got %d", len(hcs.services))
}
// sync multiple services
nsn1 := mknsn("a", "b")
nsn2 := mknsn("c", "d")
nsn3 := mknsn("e", "f")
nsn4 := mknsn("g", "h")
hcs.SyncServices(map[types.NamespacedName]uint16{
nsn1: 9376,
nsn2: 12909,
nsn3: 11113,
})
if len(hcs.services) != 3 {
t.Errorf("expected 3 service, got %d", len(hcs.services))
}
if hcs.services[nsn1].endpoints != 0 {
t.Errorf("expected 0 endpoints, got %d", hcs.services[nsn1].endpoints)
}
if hcs.services[nsn2].endpoints != 0 {
t.Errorf("expected 0 endpoints, got %d", hcs.services[nsn2].endpoints)
}
if hcs.services[nsn3].endpoints != 0 {
t.Errorf("expected 0 endpoints, got %d", hcs.services[nsn3].endpoints)
}
if len(listener.openPorts) != 3 {
t.Errorf("expected 3 open ports, got %d\n%s", len(listener.openPorts), spew.Sdump(listener.openPorts))
}
// test the handlers
testHandler(hcs, nsn1, http.StatusServiceUnavailable, 0, t)
testHandler(hcs, nsn2, http.StatusServiceUnavailable, 0, t)
testHandler(hcs, nsn3, http.StatusServiceUnavailable, 0, t)
// sync endpoints
hcs.SyncEndpoints(map[types.NamespacedName]int{
nsn1: 9,
nsn2: 3,
nsn3: 7,
})
if len(hcs.services) != 3 {
t.Errorf("expected 3 services, got %d", len(hcs.services))
}
if hcs.services[nsn1].endpoints != 9 {
t.Errorf("expected 9 endpoints, got %d", hcs.services[nsn1].endpoints)
}
if hcs.services[nsn2].endpoints != 3 {
t.Errorf("expected 3 endpoints, got %d", hcs.services[nsn2].endpoints)
}
if hcs.services[nsn3].endpoints != 7 {
t.Errorf("expected 7 endpoints, got %d", hcs.services[nsn3].endpoints)
}
// test the handlers
testHandler(hcs, nsn1, http.StatusOK, 9, t)
testHandler(hcs, nsn2, http.StatusOK, 3, t)
testHandler(hcs, nsn3, http.StatusOK, 7, t)
// sync new services
hcs.SyncServices(map[types.NamespacedName]uint16{
//nsn1: 9376, // remove it
nsn2: 12909, // leave it
nsn3: 11114, // change it
nsn4: 11878, // add it
})
if len(hcs.services) != 3 {
t.Errorf("expected 3 service, got %d", len(hcs.services))
}
if hcs.services[nsn2].endpoints != 3 {
t.Errorf("expected 3 endpoints, got %d", hcs.services[nsn2].endpoints)
}
if hcs.services[nsn3].endpoints != 0 {
t.Errorf("expected 0 endpoints, got %d", hcs.services[nsn3].endpoints)
}
if hcs.services[nsn4].endpoints != 0 {
t.Errorf("expected 0 endpoints, got %d", hcs.services[nsn4].endpoints)
}
// test the handlers
testHandler(hcs, nsn2, http.StatusOK, 3, t)
testHandler(hcs, nsn3, http.StatusServiceUnavailable, 0, t)
testHandler(hcs, nsn4, http.StatusServiceUnavailable, 0, t)
// sync endpoints
hcs.SyncEndpoints(map[types.NamespacedName]int{
nsn1: 9,
nsn2: 3,
nsn3: 7,
nsn4: 6,
})
if len(hcs.services) != 3 {
t.Errorf("expected 3 services, got %d", len(hcs.services))
}
if hcs.services[nsn2].endpoints != 3 {
t.Errorf("expected 3 endpoints, got %d", hcs.services[nsn2].endpoints)
}
if hcs.services[nsn3].endpoints != 7 {
t.Errorf("expected 7 endpoints, got %d", hcs.services[nsn3].endpoints)
}
if hcs.services[nsn4].endpoints != 6 {
t.Errorf("expected 6 endpoints, got %d", hcs.services[nsn4].endpoints)
}
// test the handlers
testHandler(hcs, nsn2, http.StatusOK, 3, t)
testHandler(hcs, nsn3, http.StatusOK, 7, t)
testHandler(hcs, nsn4, http.StatusOK, 6, t)
// sync endpoints, missing nsn2
hcs.SyncEndpoints(map[types.NamespacedName]int{
nsn3: 7,
nsn4: 6,
})
if len(hcs.services) != 3 {
t.Errorf("expected 3 services, got %d", len(hcs.services))
}
if hcs.services[nsn2].endpoints != 0 {
t.Errorf("expected 0 endpoints, got %d", hcs.services[nsn2].endpoints)
}
if hcs.services[nsn3].endpoints != 7 {
t.Errorf("expected 7 endpoints, got %d", hcs.services[nsn3].endpoints)
}
if hcs.services[nsn4].endpoints != 6 {
t.Errorf("expected 6 endpoints, got %d", hcs.services[nsn4].endpoints)
}
// test the handlers
testHandler(hcs, nsn2, http.StatusServiceUnavailable, 0, t)
testHandler(hcs, nsn3, http.StatusOK, 7, t)
testHandler(hcs, nsn4, http.StatusOK, 6, t)
}
func testHandler(hcs *server, nsn types.NamespacedName, status int, endpoints int, t *testing.T) {
handler := hcs.services[nsn].server.(*fakeHTTPServer).handler
req, err := http.NewRequest("GET", "/healthz", nil)
if err != nil {
t.Fatal(err)
}
resp := httptest.NewRecorder()
handler.ServeHTTP(resp, req)
if resp.Code != status {
t.Errorf("expected status code %v, got %v", status, resp.Code)
}
var payload hcPayload
if err := json.Unmarshal(resp.Body.Bytes(), &payload); err != nil {
t.Fatal(err)
}
if payload.Service.Name != nsn.Name || payload.Service.Namespace != nsn.Namespace {
t.Errorf("expected payload name %q, got %v", nsn.String(), payload.Service)
}
if payload.LocalEndpoints != endpoints {
t.Errorf("expected %d endpoints, got %d", endpoints, payload.LocalEndpoints)
} }
} }

View file

@ -1,46 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package healthcheck
import (
"fmt"
"net/http"
"github.com/golang/glog"
)
// A healthCheckHandler serves http requests on /healthz on the service health check node port,
// and responds to every request with either:
// 200 OK and the count of endpoints for the given service that are local to this node.
// or
// 503 Service Unavailable If the count is zero or the service does not exist
type healthCheckHandler struct {
svcNsName string
}
// HTTP Utility function to send the required statusCode and error text to a http.ResponseWriter object
func sendHealthCheckResponse(rw http.ResponseWriter, statusCode int, error string) {
rw.Header().Set("Content-Type", "text/plain")
rw.WriteHeader(statusCode)
fmt.Fprint(rw, error)
}
// ServeHTTP: Interface callback method for net.Listener Handlers
func (h healthCheckHandler) ServeHTTP(response http.ResponseWriter, req *http.Request) {
glog.V(4).Infof("Received HC Request Service %s from Cloud Load Balancer", h.svcNsName)
healthchecker.handleHealthCheckRequest(response, h.svcNsName)
}

View file

@ -1,77 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package healthcheck
// Create/Delete dynamic listeners on the required nodePorts
import (
"fmt"
"net"
"net/http"
"github.com/golang/glog"
)
// handleServiceListenerRequest: receive requests to add/remove service health check listening ports
func (h *proxyHC) handleServiceListenerRequest(req *proxyListenerRequest) bool {
sr, serviceFound := h.serviceResponderMap[req.serviceName]
if !req.add {
if !serviceFound {
return false
}
glog.Infof("Deleting HealthCheckListenPort for service %s port %d",
req.serviceName, req.listenPort)
delete(h.serviceResponderMap, req.serviceName)
(*sr.listener).Close()
return true
} else if serviceFound {
if req.listenPort == sr.listenPort {
// Addition requested but responder for service already exists and port is unchanged
return true
}
// Addition requested but responder for service already exists but the listen port has changed
glog.Infof("HealthCheckListenPort for service %s changed from %d to %d - closing old listening port",
req.serviceName, sr.listenPort, req.listenPort)
delete(h.serviceResponderMap, req.serviceName)
(*sr.listener).Close()
}
// Create a service responder object and start listening and serving on the provided port
glog.V(2).Infof("Adding health check listener for service %s on nodePort %d", req.serviceName, req.listenPort)
server := http.Server{
Addr: fmt.Sprintf(":%d", req.listenPort),
Handler: healthCheckHandler{svcNsName: req.serviceName.String()},
}
listener, err := net.Listen("tcp", server.Addr)
if err != nil {
glog.Warningf("FAILED to listen on address %s (%s)\n", server.Addr, err)
return false
}
h.serviceResponderMap[req.serviceName] = serviceResponder{serviceName: req.serviceName,
listenPort: req.listenPort,
listener: &listener,
server: &server}
go func() {
// Anonymous goroutine to block on Serve for this listen port - Serve will exit when the listener is closed
glog.V(3).Infof("Goroutine blocking on serving health checks for %s on port %d", req.serviceName, req.listenPort)
if err := server.Serve(listener); err != nil {
glog.V(3).Infof("Proxy HealthCheck listen socket %d for service %s closed with error %s\n", req.listenPort, req.serviceName, err)
return
}
glog.V(3).Infof("Proxy HealthCheck listen socket %d for service %s closed\n", req.listenPort, req.serviceName)
}()
return true
}

View file

@ -1,53 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package healthcheck LoadBalancer Healthcheck responder library for kubernetes network proxies
package healthcheck // import "k8s.io/kubernetes/pkg/proxy/healthcheck"
import (
"time"
"github.com/golang/glog"
)
var healthchecker *proxyHC
// handlerLoop Serializes all requests to prevent concurrent access to the maps
func (h *proxyHC) handlerLoop() {
ticker := time.NewTicker(1 * time.Minute)
defer ticker.Stop()
for {
select {
case req := <-h.mutationRequestChannel:
h.handleMutationRequest(req)
case req := <-h.listenerRequestChannel:
req.responseChannel <- h.handleServiceListenerRequest(req)
case <-ticker.C:
go h.sync()
}
}
}
func (h *proxyHC) sync() {
glog.V(4).Infof("%d Health Check Listeners", len(h.serviceResponderMap))
glog.V(4).Infof("%d Services registered for health checking", len(h.serviceEndpointsMap.List()))
for _, svc := range h.serviceEndpointsMap.ListKeys() {
if e, ok := h.serviceEndpointsMap.Get(svc); ok {
endpointList := e.(*serviceEndpointsList)
glog.V(4).Infof("Service %s has %d local endpoints", svc, endpointList.endpoints.Len())
}
}
}

View file

@ -50,7 +50,6 @@ go_test(
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1", "//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
"//vendor:k8s.io/apimachinery/pkg/types", "//vendor:k8s.io/apimachinery/pkg/types",
"//vendor:k8s.io/apimachinery/pkg/util/intstr", "//vendor:k8s.io/apimachinery/pkg/util/intstr",
"//vendor:k8s.io/apimachinery/pkg/util/sets",
], ],
) )

View file

@ -213,7 +213,7 @@ type Proxier struct {
nodeIP net.IP nodeIP net.IP
portMapper portOpener portMapper portOpener
recorder record.EventRecorder recorder record.EventRecorder
healthChecker healthChecker healthChecker healthcheck.Server
} }
type localPort struct { type localPort struct {
@ -245,17 +245,6 @@ func (l *listenPortOpener) OpenLocalPort(lp *localPort) (closeable, error) {
return openLocalPort(lp) return openLocalPort(lp)
} }
type healthChecker interface {
UpdateEndpoints(serviceName types.NamespacedName, endpointUIDs sets.String)
}
// TODO: the healthcheck pkg should offer a type
type globalHealthChecker struct{}
func (globalHealthChecker) UpdateEndpoints(serviceName types.NamespacedName, endpointUIDs sets.String) {
healthcheck.UpdateEndpoints(serviceName, endpointUIDs)
}
// Proxier implements ProxyProvider // Proxier implements ProxyProvider
var _ proxy.ProxyProvider = &Proxier{} var _ proxy.ProxyProvider = &Proxier{}
@ -309,8 +298,7 @@ func NewProxier(ipt utiliptables.Interface,
glog.Warningf("clusterCIDR not specified, unable to distinguish between internal and external traffic") glog.Warningf("clusterCIDR not specified, unable to distinguish between internal and external traffic")
} }
healthChecker := globalHealthChecker{} healthChecker := healthcheck.NewServer(hostname, recorder, nil, nil) // use default implementations of deps
go healthcheck.Run()
var throttle flowcontrol.RateLimiter var throttle flowcontrol.RateLimiter
// Defaulting back to not limit sync rate when minSyncPeriod is 0. // Defaulting back to not limit sync rate when minSyncPeriod is 0.
@ -444,18 +432,12 @@ func (proxier *Proxier) SyncLoop() {
} }
} }
type healthCheckPort struct {
namespace types.NamespacedName
nodeport int
}
// Accepts a list of Services and the existing service map. Returns the new // Accepts a list of Services and the existing service map. Returns the new
// service map, a list of healthcheck ports to add to or remove from the health // service map, a map of healthcheck ports, and a set of stale UDP
// checking listener service, and a set of stale UDP services. // services.
func buildServiceMap(allServices []api.Service, oldServiceMap proxyServiceMap) (proxyServiceMap, []healthCheckPort, []healthCheckPort, sets.String) { func buildServiceMap(allServices []api.Service, oldServiceMap proxyServiceMap) (proxyServiceMap, map[types.NamespacedName]uint16, sets.String) {
newServiceMap := make(proxyServiceMap) newServiceMap := make(proxyServiceMap)
healthCheckAdd := make([]healthCheckPort, 0) hcPorts := make(map[types.NamespacedName]uint16)
healthCheckDel := make([]healthCheckPort, 0)
for i := range allServices { for i := range allServices {
service := &allServices[i] service := &allServices[i]
@ -492,12 +474,8 @@ func buildServiceMap(allServices []api.Service, oldServiceMap proxyServiceMap) (
glog.V(1).Infof("Updating existing service %q at %s:%d/%s", serviceName, info.clusterIP, servicePort.Port, servicePort.Protocol) glog.V(1).Infof("Updating existing service %q at %s:%d/%s", serviceName, info.clusterIP, servicePort.Port, servicePort.Protocol)
} }
if !exists || !equal { if info.onlyNodeLocalEndpoints {
if info.onlyNodeLocalEndpoints && info.healthCheckNodePort > 0 { hcPorts[svcName] = uint16(info.healthCheckNodePort)
healthCheckAdd = append(healthCheckAdd, healthCheckPort{serviceName.NamespacedName, info.healthCheckNodePort})
} else {
healthCheckDel = append(healthCheckDel, healthCheckPort{serviceName.NamespacedName, 0})
}
} }
newServiceMap[serviceName] = info newServiceMap[serviceName] = info
@ -505,6 +483,13 @@ func buildServiceMap(allServices []api.Service, oldServiceMap proxyServiceMap) (
} }
} }
for nsn, port := range hcPorts {
if port == 0 {
glog.Errorf("Service %q has no healthcheck nodeport", nsn)
delete(hcPorts, nsn)
}
}
staleUDPServices := sets.NewString() staleUDPServices := sets.NewString()
// Remove serviceports missing from the update. // Remove serviceports missing from the update.
for name, info := range oldServiceMap { for name, info := range oldServiceMap {
@ -513,13 +498,10 @@ func buildServiceMap(allServices []api.Service, oldServiceMap proxyServiceMap) (
if info.protocol == api.ProtocolUDP { if info.protocol == api.ProtocolUDP {
staleUDPServices.Insert(info.clusterIP.String()) staleUDPServices.Insert(info.clusterIP.String())
} }
if info.onlyNodeLocalEndpoints && info.healthCheckNodePort > 0 {
healthCheckDel = append(healthCheckDel, healthCheckPort{name.NamespacedName, info.healthCheckNodePort})
}
} }
} }
return newServiceMap, healthCheckAdd, healthCheckDel, staleUDPServices return newServiceMap, hcPorts, staleUDPServices
} }
// OnServiceUpdate tracks the active set of service proxies. // OnServiceUpdate tracks the active set of service proxies.
@ -533,19 +515,11 @@ func (proxier *Proxier) OnServiceUpdate(allServices []api.Service) {
defer proxier.mu.Unlock() defer proxier.mu.Unlock()
proxier.haveReceivedServiceUpdate = true proxier.haveReceivedServiceUpdate = true
newServiceMap, hcAdd, hcDel, staleUDPServices := buildServiceMap(allServices, proxier.serviceMap) newServiceMap, hcPorts, staleUDPServices := buildServiceMap(allServices, proxier.serviceMap)
for _, hc := range hcAdd {
glog.V(4).Infof("Adding health check for %+v, port %v", hc.namespace, hc.nodeport) // update healthcheck ports
// Turn on healthcheck responder to listen on the health check nodePort if err := proxier.healthChecker.SyncServices(hcPorts); err != nil {
// FIXME: handle failures from adding the service glog.Errorf("Error syncing healtcheck ports: %v", err)
healthcheck.AddServiceListener(hc.namespace, hc.nodeport)
}
for _, hc := range hcDel {
// Remove ServiceListener health check nodePorts from the health checker
// TODO - Stats
glog.V(4).Infof("Deleting health check for %+v, port %v", hc.namespace, hc.nodeport)
// FIXME: handle failures from deleting the service
healthcheck.DeleteServiceListener(hc.namespace, hc.nodeport)
} }
if len(newServiceMap) != len(proxier.serviceMap) || !reflect.DeepEqual(newServiceMap, proxier.serviceMap) { if len(newServiceMap) != len(proxier.serviceMap) || !reflect.DeepEqual(newServiceMap, proxier.serviceMap) {
@ -568,7 +542,13 @@ func (proxier *Proxier) OnEndpointsUpdate(allEndpoints []api.Endpoints) {
proxier.allEndpoints = allEndpoints proxier.allEndpoints = allEndpoints
// TODO: once service has made this same transform, move this into proxier.syncProxyRules() // TODO: once service has made this same transform, move this into proxier.syncProxyRules()
newMap, staleConnections := updateEndpoints(proxier.allEndpoints, proxier.endpointsMap, proxier.hostname, proxier.healthChecker) newMap, hcEndpoints, staleConnections := updateEndpoints(proxier.allEndpoints, proxier.endpointsMap, proxier.hostname)
// update healthcheck endpoints
if err := proxier.healthChecker.SyncEndpoints(hcEndpoints); err != nil {
glog.Errorf("Error syncing healthcheck endoints: %v", err)
}
if len(newMap) != len(proxier.endpointsMap) || !reflect.DeepEqual(newMap, proxier.endpointsMap) { if len(newMap) != len(proxier.endpointsMap) || !reflect.DeepEqual(newMap, proxier.endpointsMap) {
proxier.endpointsMap = newMap proxier.endpointsMap = newMap
proxier.syncProxyRules() proxier.syncProxyRules()
@ -580,11 +560,11 @@ func (proxier *Proxier) OnEndpointsUpdate(allEndpoints []api.Endpoints) {
} }
// Convert a slice of api.Endpoints objects into a map of service-port -> endpoints. // Convert a slice of api.Endpoints objects into a map of service-port -> endpoints.
func updateEndpoints(allEndpoints []api.Endpoints, curMap proxyEndpointMap, hostname string, func updateEndpoints(allEndpoints []api.Endpoints, curMap proxyEndpointMap, hostname string) (newMap proxyEndpointMap, hcEndpoints map[types.NamespacedName]int, staleSet map[endpointServicePair]bool) {
healthChecker healthChecker) (newMap proxyEndpointMap, staleSet map[endpointServicePair]bool) {
// return values // return values
newMap = make(proxyEndpointMap) newMap = make(proxyEndpointMap)
hcEndpoints = make(map[types.NamespacedName]int)
staleSet = make(map[endpointServicePair]bool) staleSet = make(map[endpointServicePair]bool)
// Update endpoints for services. // Update endpoints for services.
@ -610,19 +590,30 @@ func updateEndpoints(allEndpoints []api.Endpoints, curMap proxyEndpointMap, host
} }
} }
// Update service health check if !utilfeature.DefaultFeatureGate.Enabled(features.ExternalTrafficLocalOnly) {
allSvcPorts := make(map[proxy.ServicePortName]bool) return
for svcPort := range curMap {
allSvcPorts[svcPort] = true
}
for svcPort := range newMap {
allSvcPorts[svcPort] = true
}
for svcPort := range allSvcPorts {
updateHealthCheckEntries(svcPort.NamespacedName, newMap[svcPort], healthChecker)
} }
return newMap, staleSet // accumulate local IPs per service, ignoring ports
localIPs := map[types.NamespacedName]sets.String{}
for svcPort := range newMap {
for _, ep := range newMap[svcPort] {
if ep.isLocal {
nsn := svcPort.NamespacedName
if localIPs[nsn] == nil {
localIPs[nsn] = sets.NewString()
}
ip := strings.Split(ep.endpoint, ":")[0] // just the IP part
localIPs[nsn].Insert(ip)
}
}
}
// produce a count per service
for nsn, ips := range localIPs {
hcEndpoints[nsn] = len(ips)
}
return newMap, hcEndpoints, staleSet
} }
// Gather information about all the endpoint state for a given api.Endpoints. // Gather information about all the endpoint state for a given api.Endpoints.
@ -668,23 +659,6 @@ func accumulateEndpointsMap(endpoints *api.Endpoints, hostname string,
} }
} }
// updateHealthCheckEntries - send the new set of local endpoints to the health checker
func updateHealthCheckEntries(name types.NamespacedName, endpoints []*endpointsInfo, healthChecker healthChecker) {
if !utilfeature.DefaultFeatureGate.Enabled(features.ExternalTrafficLocalOnly) {
return
}
// Use a set instead of a slice to provide deduplication
epSet := sets.NewString()
for _, portInfo := range endpoints {
if portInfo.isLocal {
// kube-proxy health check only needs local endpoints
epSet.Insert(fmt.Sprintf("%s/%s", name.Namespace, name.Name))
}
}
healthChecker.UpdateEndpoints(name, epSet)
}
// portProtoHash takes the ServicePortName and protocol for a service // portProtoHash takes the ServicePortName and protocol for a service
// returns the associated 16 character hash. This is computed by hashing (sha256) // returns the associated 16 character hash. This is computed by hashing (sha256)
// then encoding to base32 and truncating to 16 chars. We do this because IPTables // then encoding to base32 and truncating to 16 chars. We do this because IPTables

View file

@ -17,6 +17,7 @@ limitations under the License.
package iptables package iptables
import ( import (
"reflect"
"strconv" "strconv"
"testing" "testing"
@ -29,7 +30,6 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/service" "k8s.io/kubernetes/pkg/api/service"
"k8s.io/kubernetes/pkg/proxy" "k8s.io/kubernetes/pkg/proxy"
@ -355,9 +355,27 @@ func (f *fakePortOpener) OpenLocalPort(lp *localPort) (closeable, error) {
return nil, nil return nil, nil
} }
type fakeHealthChecker struct{} type fakeHealthChecker struct {
services map[types.NamespacedName]uint16
endpoints map[types.NamespacedName]int
}
func (fakeHealthChecker) UpdateEndpoints(serviceName types.NamespacedName, endpointUIDs sets.String) {} func newFakeHealthChecker() *fakeHealthChecker {
return &fakeHealthChecker{
services: map[types.NamespacedName]uint16{},
endpoints: map[types.NamespacedName]int{},
}
}
func (fake *fakeHealthChecker) SyncServices(newServices map[types.NamespacedName]uint16) error {
fake.services = newServices
return nil
}
func (fake *fakeHealthChecker) SyncEndpoints(newEndpoints map[types.NamespacedName]int) error {
fake.endpoints = newEndpoints
return nil
}
const testHostname = "test-hostname" const testHostname = "test-hostname"
@ -374,7 +392,7 @@ func NewFakeProxier(ipt utiliptables.Interface) *Proxier {
hostname: testHostname, hostname: testHostname,
portsMap: make(map[localPort]closeable), portsMap: make(map[localPort]closeable),
portMapper: &fakePortOpener{[]*localPort{}}, portMapper: &fakePortOpener{[]*localPort{}},
healthChecker: fakeHealthChecker{}, healthChecker: newFakeHealthChecker(),
} }
} }
@ -926,30 +944,18 @@ func TestBuildServiceMapAddRemove(t *testing.T) {
}), }),
} }
serviceMap, hcAdd, hcDel, staleUDPServices := buildServiceMap(services, make(proxyServiceMap)) serviceMap, hcPorts, staleUDPServices := buildServiceMap(services, make(proxyServiceMap))
if len(serviceMap) != 8 { if len(serviceMap) != 8 {
t.Errorf("expected service map length 8, got %v", serviceMap) t.Errorf("expected service map length 8, got %v", serviceMap)
} }
// The only-local-loadbalancer ones get added // The only-local-loadbalancer ones get added
if len(hcAdd) != 2 { if len(hcPorts) != 1 {
t.Errorf("expected healthcheck add length 2, got %v", hcAdd) t.Errorf("expected 1 healthcheck port, got %v", hcPorts)
} else { } else {
for _, hc := range hcAdd { nsn := makeNSN("somewhere", "only-local-load-balancer")
if hc.namespace.Namespace != "somewhere" || hc.namespace.Name != "only-local-load-balancer" { if port, found := hcPorts[nsn]; !found || port != 345 {
t.Errorf("unexpected healthcheck listener added: %v", hc) t.Errorf("expected healthcheck port [%q]=345: got %v", nsn, hcPorts)
}
}
}
// All the rest get deleted
if len(hcDel) != 6 {
t.Errorf("expected healthcheck del length 6, got %v", hcDel)
} else {
for _, hc := range hcDel {
if hc.namespace.Namespace == "somewhere" && hc.namespace.Name == "only-local-load-balancer" {
t.Errorf("unexpected healthcheck listener deleted: %v", hc)
}
} }
} }
@ -961,27 +967,13 @@ func TestBuildServiceMapAddRemove(t *testing.T) {
// Remove some stuff // Remove some stuff
services = []api.Service{services[0]} services = []api.Service{services[0]}
services[0].Spec.Ports = []api.ServicePort{services[0].Spec.Ports[1]} services[0].Spec.Ports = []api.ServicePort{services[0].Spec.Ports[1]}
serviceMap, hcAdd, hcDel, staleUDPServices = buildServiceMap(services, serviceMap) serviceMap, hcPorts, staleUDPServices = buildServiceMap(services, serviceMap)
if len(serviceMap) != 1 { if len(serviceMap) != 1 {
t.Errorf("expected service map length 1, got %v", serviceMap) t.Errorf("expected service map length 1, got %v", serviceMap)
} }
if len(hcAdd) != 0 { if len(hcPorts) != 0 {
t.Errorf("expected healthcheck add length 1, got %v", hcAdd) t.Errorf("expected healthcheck ports length 1, got %v", hcPorts)
}
// The only OnlyLocal annotation was removed above, so we expect a delete now.
// FIXME: Since the BetaAnnotationHealthCheckNodePort is the same for all
// ServicePorts, we'll get one delete per ServicePort, even though they all
// contain the same information
if len(hcDel) != 2 {
t.Errorf("expected healthcheck del length 2, got %v", hcDel)
} else {
for _, hc := range hcDel {
if hc.namespace.Namespace != "somewhere" || hc.namespace.Name != "only-local-load-balancer" {
t.Errorf("unexpected healthcheck listener deleted: %v", hc)
}
}
} }
// All services but one were deleted. While you'd expect only the ClusterIPs // All services but one were deleted. While you'd expect only the ClusterIPs
@ -1008,17 +1000,14 @@ func TestBuildServiceMapServiceHeadless(t *testing.T) {
} }
// Headless service should be ignored // Headless service should be ignored
serviceMap, hcAdd, hcDel, staleUDPServices := buildServiceMap(services, make(proxyServiceMap)) serviceMap, hcPorts, staleUDPServices := buildServiceMap(services, make(proxyServiceMap))
if len(serviceMap) != 0 { if len(serviceMap) != 0 {
t.Errorf("expected service map length 0, got %d", len(serviceMap)) t.Errorf("expected service map length 0, got %d", len(serviceMap))
} }
// No proxied services, so no healthchecks // No proxied services, so no healthchecks
if len(hcAdd) != 0 { if len(hcPorts) != 0 {
t.Errorf("expected healthcheck add length 0, got %d", len(hcAdd)) t.Errorf("expected healthcheck ports length 0, got %d", len(hcPorts))
}
if len(hcDel) != 0 {
t.Errorf("expected healthcheck del length 0, got %d", len(hcDel))
} }
if len(staleUDPServices) != 0 { if len(staleUDPServices) != 0 {
@ -1036,16 +1025,13 @@ func TestBuildServiceMapServiceTypeExternalName(t *testing.T) {
}), }),
} }
serviceMap, hcAdd, hcDel, staleUDPServices := buildServiceMap(services, make(proxyServiceMap)) serviceMap, hcPorts, staleUDPServices := buildServiceMap(services, make(proxyServiceMap))
if len(serviceMap) != 0 { if len(serviceMap) != 0 {
t.Errorf("expected service map length 0, got %v", serviceMap) t.Errorf("expected service map length 0, got %v", serviceMap)
} }
// No proxied services, so no healthchecks // No proxied services, so no healthchecks
if len(hcAdd) != 0 { if len(hcPorts) != 0 {
t.Errorf("expected healthcheck add length 0, got %v", hcAdd) t.Errorf("expected healthcheck ports length 0, got %v", hcPorts)
}
if len(hcDel) != 0 {
t.Errorf("expected healthcheck del length 0, got %v", hcDel)
} }
if len(staleUDPServices) != 0 { if len(staleUDPServices) != 0 {
t.Errorf("expected stale UDP services length 0, got %v", staleUDPServices) t.Errorf("expected stale UDP services length 0, got %v", staleUDPServices)
@ -1081,15 +1067,12 @@ func TestBuildServiceMapServiceUpdate(t *testing.T) {
}), }),
} }
serviceMap, hcAdd, hcDel, staleUDPServices := buildServiceMap(first, make(proxyServiceMap)) serviceMap, hcPorts, staleUDPServices := buildServiceMap(first, make(proxyServiceMap))
if len(serviceMap) != 2 { if len(serviceMap) != 2 {
t.Errorf("expected service map length 2, got %v", serviceMap) t.Errorf("expected service map length 2, got %v", serviceMap)
} }
if len(hcAdd) != 0 { if len(hcPorts) != 0 {
t.Errorf("expected healthcheck add length 0, got %v", hcAdd) t.Errorf("expected healthcheck ports length 0, got %v", hcPorts)
}
if len(hcDel) != 2 {
t.Errorf("expected healthcheck del length 2, got %v", hcDel)
} }
if len(staleUDPServices) != 0 { if len(staleUDPServices) != 0 {
// Services only added, so nothing stale yet // Services only added, so nothing stale yet
@ -1097,15 +1080,12 @@ func TestBuildServiceMapServiceUpdate(t *testing.T) {
} }
// Change service to load-balancer // Change service to load-balancer
serviceMap, hcAdd, hcDel, staleUDPServices = buildServiceMap(second, serviceMap) serviceMap, hcPorts, staleUDPServices = buildServiceMap(second, serviceMap)
if len(serviceMap) != 2 { if len(serviceMap) != 2 {
t.Errorf("expected service map length 2, got %v", serviceMap) t.Errorf("expected service map length 2, got %v", serviceMap)
} }
if len(hcAdd) != 2 { if len(hcPorts) != 1 {
t.Errorf("expected healthcheck add length 2, got %v", hcAdd) t.Errorf("expected healthcheck ports length 1, got %v", hcPorts)
}
if len(hcDel) != 0 {
t.Errorf("expected healthcheck add length 2, got %v", hcDel)
} }
if len(staleUDPServices) != 0 { if len(staleUDPServices) != 0 {
t.Errorf("expected stale UDP services length 0, got %v", staleUDPServices.List()) t.Errorf("expected stale UDP services length 0, got %v", staleUDPServices.List())
@ -1113,30 +1093,24 @@ func TestBuildServiceMapServiceUpdate(t *testing.T) {
// No change; make sure the service map stays the same and there are // No change; make sure the service map stays the same and there are
// no health-check changes // no health-check changes
serviceMap, hcAdd, hcDel, staleUDPServices = buildServiceMap(second, serviceMap) serviceMap, hcPorts, staleUDPServices = buildServiceMap(second, serviceMap)
if len(serviceMap) != 2 { if len(serviceMap) != 2 {
t.Errorf("expected service map length 2, got %v", serviceMap) t.Errorf("expected service map length 2, got %v", serviceMap)
} }
if len(hcAdd) != 0 { if len(hcPorts) != 1 {
t.Errorf("expected healthcheck add length 0, got %v", hcAdd) t.Errorf("expected healthcheck ports length 1, got %v", hcPorts)
}
if len(hcDel) != 0 {
t.Errorf("expected healthcheck add length 2, got %v", hcDel)
} }
if len(staleUDPServices) != 0 { if len(staleUDPServices) != 0 {
t.Errorf("expected stale UDP services length 0, got %v", staleUDPServices.List()) t.Errorf("expected stale UDP services length 0, got %v", staleUDPServices.List())
} }
// And back to ClusterIP // And back to ClusterIP
serviceMap, hcAdd, hcDel, staleUDPServices = buildServiceMap(first, serviceMap) serviceMap, hcPorts, staleUDPServices = buildServiceMap(first, serviceMap)
if len(serviceMap) != 2 { if len(serviceMap) != 2 {
t.Errorf("expected service map length 2, got %v", serviceMap) t.Errorf("expected service map length 2, got %v", serviceMap)
} }
if len(hcAdd) != 0 { if len(hcPorts) != 0 {
t.Errorf("expected healthcheck add length 0, got %v", hcAdd) t.Errorf("expected healthcheck ports length 0, got %v", hcPorts)
}
if len(hcDel) != 2 {
t.Errorf("expected healthcheck del length 2, got %v", hcDel)
} }
if len(staleUDPServices) != 0 { if len(staleUDPServices) != 0 {
// Services only added, so nothing stale yet // Services only added, so nothing stale yet
@ -1386,28 +1360,33 @@ func makeTestEndpoints(namespace, name string, eptFunc func(*api.Endpoints)) api
return ept return ept
} }
func makeNSN(namespace, name string) types.NamespacedName {
return types.NamespacedName{Namespace: namespace, Name: name}
}
func makeServicePortName(ns, name, port string) proxy.ServicePortName { func makeServicePortName(ns, name, port string) proxy.ServicePortName {
return proxy.ServicePortName{ return proxy.ServicePortName{
NamespacedName: types.NamespacedName{ NamespacedName: makeNSN(ns, name),
Namespace: ns, Port: port,
Name: name,
},
Port: port,
} }
} }
func Test_updateEndpoints(t *testing.T) { func Test_updateEndpoints(t *testing.T) {
var nodeName = "host"
testCases := []struct { testCases := []struct {
newEndpoints []api.Endpoints newEndpoints []api.Endpoints
oldEndpoints map[proxy.ServicePortName][]*endpointsInfo oldEndpoints map[proxy.ServicePortName][]*endpointsInfo
expectedResult map[proxy.ServicePortName][]*endpointsInfo expectedResult map[proxy.ServicePortName][]*endpointsInfo
expectedStale []endpointServicePair expectedStale []endpointServicePair
expectedHealthchecks map[types.NamespacedName]int
}{{ }{{
// Case[0]: nothing // Case[0]: nothing
newEndpoints: []api.Endpoints{}, newEndpoints: []api.Endpoints{},
oldEndpoints: map[proxy.ServicePortName][]*endpointsInfo{}, oldEndpoints: map[proxy.ServicePortName][]*endpointsInfo{},
expectedResult: map[proxy.ServicePortName][]*endpointsInfo{}, expectedResult: map[proxy.ServicePortName][]*endpointsInfo{},
expectedStale: []endpointServicePair{}, expectedStale: []endpointServicePair{},
expectedHealthchecks: map[types.NamespacedName]int{},
}, { }, {
// Case[1]: no change, unnamed port // Case[1]: no change, unnamed port
newEndpoints: []api.Endpoints{ newEndpoints: []api.Endpoints{
@ -1432,14 +1411,16 @@ func Test_updateEndpoints(t *testing.T) {
{"1.1.1.1:11", false}, {"1.1.1.1:11", false},
}, },
}, },
expectedStale: []endpointServicePair{}, expectedStale: []endpointServicePair{},
expectedHealthchecks: map[types.NamespacedName]int{},
}, { }, {
// Case[2]: no change, named port // Case[2]: no change, named port, local
newEndpoints: []api.Endpoints{ newEndpoints: []api.Endpoints{
makeTestEndpoints("ns1", "ep1", func(ept *api.Endpoints) { makeTestEndpoints("ns1", "ep1", func(ept *api.Endpoints) {
ept.Subsets = []api.EndpointSubset{{ ept.Subsets = []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{ Addresses: []api.EndpointAddress{{
IP: "1.1.1.1", IP: "1.1.1.1",
NodeName: &nodeName,
}}, }},
Ports: []api.EndpointPort{{ Ports: []api.EndpointPort{{
Name: "p11", Name: "p11",
@ -1450,15 +1431,18 @@ func Test_updateEndpoints(t *testing.T) {
}, },
oldEndpoints: map[proxy.ServicePortName][]*endpointsInfo{ oldEndpoints: map[proxy.ServicePortName][]*endpointsInfo{
makeServicePortName("ns1", "ep1", "p11"): { makeServicePortName("ns1", "ep1", "p11"): {
{"1.1.1.1:11", false}, {"1.1.1.1:11", true},
}, },
}, },
expectedResult: map[proxy.ServicePortName][]*endpointsInfo{ expectedResult: map[proxy.ServicePortName][]*endpointsInfo{
makeServicePortName("ns1", "ep1", "p11"): { makeServicePortName("ns1", "ep1", "p11"): {
{"1.1.1.1:11", false}, {"1.1.1.1:11", true},
}, },
}, },
expectedStale: []endpointServicePair{}, expectedStale: []endpointServicePair{},
expectedHealthchecks: map[types.NamespacedName]int{
makeNSN("ns1", "ep1"): 1,
},
}, { }, {
// Case[3]: no change, multiple subsets // Case[3]: no change, multiple subsets
newEndpoints: []api.Endpoints{ newEndpoints: []api.Endpoints{
@ -1498,14 +1482,16 @@ func Test_updateEndpoints(t *testing.T) {
{"1.1.1.2:12", false}, {"1.1.1.2:12", false},
}, },
}, },
expectedStale: []endpointServicePair{}, expectedStale: []endpointServicePair{},
expectedHealthchecks: map[types.NamespacedName]int{},
}, { }, {
// Case[4]: no change, multiple subsets, multiple ports // Case[4]: no change, multiple subsets, multiple ports, local
newEndpoints: []api.Endpoints{ newEndpoints: []api.Endpoints{
makeTestEndpoints("ns1", "ep1", func(ept *api.Endpoints) { makeTestEndpoints("ns1", "ep1", func(ept *api.Endpoints) {
ept.Subsets = []api.EndpointSubset{{ ept.Subsets = []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{ Addresses: []api.EndpointAddress{{
IP: "1.1.1.1", IP: "1.1.1.1",
NodeName: &nodeName,
}}, }},
Ports: []api.EndpointPort{{ Ports: []api.EndpointPort{{
Name: "p11", Name: "p11",
@ -1527,10 +1513,10 @@ func Test_updateEndpoints(t *testing.T) {
}, },
oldEndpoints: map[proxy.ServicePortName][]*endpointsInfo{ oldEndpoints: map[proxy.ServicePortName][]*endpointsInfo{
makeServicePortName("ns1", "ep1", "p11"): { makeServicePortName("ns1", "ep1", "p11"): {
{"1.1.1.1:11", false}, {"1.1.1.1:11", true},
}, },
makeServicePortName("ns1", "ep1", "p12"): { makeServicePortName("ns1", "ep1", "p12"): {
{"1.1.1.1:12", false}, {"1.1.1.1:12", true},
}, },
makeServicePortName("ns1", "ep1", "p13"): { makeServicePortName("ns1", "ep1", "p13"): {
{"1.1.1.3:13", false}, {"1.1.1.3:13", false},
@ -1538,16 +1524,19 @@ func Test_updateEndpoints(t *testing.T) {
}, },
expectedResult: map[proxy.ServicePortName][]*endpointsInfo{ expectedResult: map[proxy.ServicePortName][]*endpointsInfo{
makeServicePortName("ns1", "ep1", "p11"): { makeServicePortName("ns1", "ep1", "p11"): {
{"1.1.1.1:11", false}, {"1.1.1.1:11", true},
}, },
makeServicePortName("ns1", "ep1", "p12"): { makeServicePortName("ns1", "ep1", "p12"): {
{"1.1.1.1:12", false}, {"1.1.1.1:12", true},
}, },
makeServicePortName("ns1", "ep1", "p13"): { makeServicePortName("ns1", "ep1", "p13"): {
{"1.1.1.3:13", false}, {"1.1.1.3:13", false},
}, },
}, },
expectedStale: []endpointServicePair{}, expectedStale: []endpointServicePair{},
expectedHealthchecks: map[types.NamespacedName]int{
makeNSN("ns1", "ep1"): 1,
},
}, { }, {
// Case[5]: no change, multiple endpoints, subsets, IPs, and ports // Case[5]: no change, multiple endpoints, subsets, IPs, and ports
newEndpoints: []api.Endpoints{ newEndpoints: []api.Endpoints{
@ -1556,7 +1545,8 @@ func Test_updateEndpoints(t *testing.T) {
Addresses: []api.EndpointAddress{{ Addresses: []api.EndpointAddress{{
IP: "1.1.1.1", IP: "1.1.1.1",
}, { }, {
IP: "1.1.1.2", IP: "1.1.1.2",
NodeName: &nodeName,
}}, }},
Ports: []api.EndpointPort{{ Ports: []api.EndpointPort{{
Name: "p11", Name: "p11",
@ -1569,7 +1559,8 @@ func Test_updateEndpoints(t *testing.T) {
Addresses: []api.EndpointAddress{{ Addresses: []api.EndpointAddress{{
IP: "1.1.1.3", IP: "1.1.1.3",
}, { }, {
IP: "1.1.1.4", IP: "1.1.1.4",
NodeName: &nodeName,
}}, }},
Ports: []api.EndpointPort{{ Ports: []api.EndpointPort{{
Name: "p13", Name: "p13",
@ -1585,7 +1576,8 @@ func Test_updateEndpoints(t *testing.T) {
Addresses: []api.EndpointAddress{{ Addresses: []api.EndpointAddress{{
IP: "2.2.2.1", IP: "2.2.2.1",
}, { }, {
IP: "2.2.2.2", IP: "2.2.2.2",
NodeName: &nodeName,
}}, }},
Ports: []api.EndpointPort{{ Ports: []api.EndpointPort{{
Name: "p21", Name: "p21",
@ -1600,63 +1592,68 @@ func Test_updateEndpoints(t *testing.T) {
oldEndpoints: map[proxy.ServicePortName][]*endpointsInfo{ oldEndpoints: map[proxy.ServicePortName][]*endpointsInfo{
makeServicePortName("ns1", "ep1", "p11"): { makeServicePortName("ns1", "ep1", "p11"): {
{"1.1.1.1:11", false}, {"1.1.1.1:11", false},
{"1.1.1.2:11", false}, {"1.1.1.2:11", true},
}, },
makeServicePortName("ns1", "ep1", "p12"): { makeServicePortName("ns1", "ep1", "p12"): {
{"1.1.1.1:12", false}, {"1.1.1.1:12", false},
{"1.1.1.2:12", false}, {"1.1.1.2:12", true},
}, },
makeServicePortName("ns1", "ep1", "p13"): { makeServicePortName("ns1", "ep1", "p13"): {
{"1.1.1.3:13", false}, {"1.1.1.3:13", false},
{"1.1.1.4:13", false}, {"1.1.1.4:13", true},
}, },
makeServicePortName("ns1", "ep1", "p14"): { makeServicePortName("ns1", "ep1", "p14"): {
{"1.1.1.3:14", false}, {"1.1.1.3:14", false},
{"1.1.1.4:14", false}, {"1.1.1.4:14", true},
}, },
makeServicePortName("ns2", "ep2", "p21"): { makeServicePortName("ns2", "ep2", "p21"): {
{"2.2.2.1:21", false}, {"2.2.2.1:21", false},
{"2.2.2.2:21", false}, {"2.2.2.2:21", true},
}, },
makeServicePortName("ns2", "ep2", "p22"): { makeServicePortName("ns2", "ep2", "p22"): {
{"2.2.2.1:22", false}, {"2.2.2.1:22", false},
{"2.2.2.2:22", false}, {"2.2.2.2:22", true},
}, },
}, },
expectedResult: map[proxy.ServicePortName][]*endpointsInfo{ expectedResult: map[proxy.ServicePortName][]*endpointsInfo{
makeServicePortName("ns1", "ep1", "p11"): { makeServicePortName("ns1", "ep1", "p11"): {
{"1.1.1.1:11", false}, {"1.1.1.1:11", false},
{"1.1.1.2:11", false}, {"1.1.1.2:11", true},
}, },
makeServicePortName("ns1", "ep1", "p12"): { makeServicePortName("ns1", "ep1", "p12"): {
{"1.1.1.1:12", false}, {"1.1.1.1:12", false},
{"1.1.1.2:12", false}, {"1.1.1.2:12", true},
}, },
makeServicePortName("ns1", "ep1", "p13"): { makeServicePortName("ns1", "ep1", "p13"): {
{"1.1.1.3:13", false}, {"1.1.1.3:13", false},
{"1.1.1.4:13", false}, {"1.1.1.4:13", true},
}, },
makeServicePortName("ns1", "ep1", "p14"): { makeServicePortName("ns1", "ep1", "p14"): {
{"1.1.1.3:14", false}, {"1.1.1.3:14", false},
{"1.1.1.4:14", false}, {"1.1.1.4:14", true},
}, },
makeServicePortName("ns2", "ep2", "p21"): { makeServicePortName("ns2", "ep2", "p21"): {
{"2.2.2.1:21", false}, {"2.2.2.1:21", false},
{"2.2.2.2:21", false}, {"2.2.2.2:21", true},
}, },
makeServicePortName("ns2", "ep2", "p22"): { makeServicePortName("ns2", "ep2", "p22"): {
{"2.2.2.1:22", false}, {"2.2.2.1:22", false},
{"2.2.2.2:22", false}, {"2.2.2.2:22", true},
}, },
}, },
expectedStale: []endpointServicePair{}, expectedStale: []endpointServicePair{},
expectedHealthchecks: map[types.NamespacedName]int{
makeNSN("ns1", "ep1"): 2,
makeNSN("ns2", "ep2"): 1,
},
}, { }, {
// Case[6]: add an Endpoints // Case[6]: add an Endpoints
newEndpoints: []api.Endpoints{ newEndpoints: []api.Endpoints{
makeTestEndpoints("ns1", "ep1", func(ept *api.Endpoints) { makeTestEndpoints("ns1", "ep1", func(ept *api.Endpoints) {
ept.Subsets = []api.EndpointSubset{{ ept.Subsets = []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{ Addresses: []api.EndpointAddress{{
IP: "1.1.1.1", IP: "1.1.1.1",
NodeName: &nodeName,
}}, }},
Ports: []api.EndpointPort{{ Ports: []api.EndpointPort{{
Port: 11, Port: 11,
@ -1667,16 +1664,19 @@ func Test_updateEndpoints(t *testing.T) {
oldEndpoints: map[proxy.ServicePortName][]*endpointsInfo{ /* empty */ }, oldEndpoints: map[proxy.ServicePortName][]*endpointsInfo{ /* empty */ },
expectedResult: map[proxy.ServicePortName][]*endpointsInfo{ expectedResult: map[proxy.ServicePortName][]*endpointsInfo{
makeServicePortName("ns1", "ep1", ""): { makeServicePortName("ns1", "ep1", ""): {
{"1.1.1.1:11", false}, {"1.1.1.1:11", true},
}, },
}, },
expectedStale: []endpointServicePair{}, expectedStale: []endpointServicePair{},
expectedHealthchecks: map[types.NamespacedName]int{
makeNSN("ns1", "ep1"): 1,
},
}, { }, {
// Case[7]: remove an Endpoints // Case[7]: remove an Endpoints
newEndpoints: []api.Endpoints{ /* empty */ }, newEndpoints: []api.Endpoints{ /* empty */ },
oldEndpoints: map[proxy.ServicePortName][]*endpointsInfo{ oldEndpoints: map[proxy.ServicePortName][]*endpointsInfo{
makeServicePortName("ns1", "ep1", ""): { makeServicePortName("ns1", "ep1", ""): {
{"1.1.1.1:11", false}, {"1.1.1.1:11", true},
}, },
}, },
expectedResult: map[proxy.ServicePortName][]*endpointsInfo{}, expectedResult: map[proxy.ServicePortName][]*endpointsInfo{},
@ -1684,6 +1684,7 @@ func Test_updateEndpoints(t *testing.T) {
endpoint: "1.1.1.1:11", endpoint: "1.1.1.1:11",
servicePortName: makeServicePortName("ns1", "ep1", ""), servicePortName: makeServicePortName("ns1", "ep1", ""),
}}, }},
expectedHealthchecks: map[types.NamespacedName]int{},
}, { }, {
// Case[8]: add an IP and port // Case[8]: add an IP and port
newEndpoints: []api.Endpoints{ newEndpoints: []api.Endpoints{
@ -1692,7 +1693,8 @@ func Test_updateEndpoints(t *testing.T) {
Addresses: []api.EndpointAddress{{ Addresses: []api.EndpointAddress{{
IP: "1.1.1.1", IP: "1.1.1.1",
}, { }, {
IP: "1.1.1.2", IP: "1.1.1.2",
NodeName: &nodeName,
}}, }},
Ports: []api.EndpointPort{{ Ports: []api.EndpointPort{{
Name: "p11", Name: "p11",
@ -1712,14 +1714,17 @@ func Test_updateEndpoints(t *testing.T) {
expectedResult: map[proxy.ServicePortName][]*endpointsInfo{ expectedResult: map[proxy.ServicePortName][]*endpointsInfo{
makeServicePortName("ns1", "ep1", "p11"): { makeServicePortName("ns1", "ep1", "p11"): {
{"1.1.1.1:11", false}, {"1.1.1.1:11", false},
{"1.1.1.2:11", false}, {"1.1.1.2:11", true},
}, },
makeServicePortName("ns1", "ep1", "p12"): { makeServicePortName("ns1", "ep1", "p12"): {
{"1.1.1.1:12", false}, {"1.1.1.1:12", false},
{"1.1.1.2:12", false}, {"1.1.1.2:12", true},
}, },
}, },
expectedStale: []endpointServicePair{}, expectedStale: []endpointServicePair{},
expectedHealthchecks: map[types.NamespacedName]int{
makeNSN("ns1", "ep1"): 1,
},
}, { }, {
// Case[9]: remove an IP and port // Case[9]: remove an IP and port
newEndpoints: []api.Endpoints{ newEndpoints: []api.Endpoints{
@ -1760,6 +1765,7 @@ func Test_updateEndpoints(t *testing.T) {
endpoint: "1.1.1.2:12", endpoint: "1.1.1.2:12",
servicePortName: makeServicePortName("ns1", "ep1", "p12"), servicePortName: makeServicePortName("ns1", "ep1", "p12"),
}}, }},
expectedHealthchecks: map[types.NamespacedName]int{},
}, { }, {
// Case[10]: add a subset // Case[10]: add a subset
newEndpoints: []api.Endpoints{ newEndpoints: []api.Endpoints{
@ -1774,7 +1780,8 @@ func Test_updateEndpoints(t *testing.T) {
}}, }},
}, { }, {
Addresses: []api.EndpointAddress{{ Addresses: []api.EndpointAddress{{
IP: "2.2.2.2", IP: "2.2.2.2",
NodeName: &nodeName,
}}, }},
Ports: []api.EndpointPort{{ Ports: []api.EndpointPort{{
Name: "p22", Name: "p22",
@ -1793,10 +1800,13 @@ func Test_updateEndpoints(t *testing.T) {
{"1.1.1.1:11", false}, {"1.1.1.1:11", false},
}, },
makeServicePortName("ns1", "ep1", "p22"): { makeServicePortName("ns1", "ep1", "p22"): {
{"2.2.2.2:22", false}, {"2.2.2.2:22", true},
}, },
}, },
expectedStale: []endpointServicePair{}, expectedStale: []endpointServicePair{},
expectedHealthchecks: map[types.NamespacedName]int{
makeNSN("ns1", "ep1"): 1,
},
}, { }, {
// Case[11]: remove a subset // Case[11]: remove a subset
newEndpoints: []api.Endpoints{ newEndpoints: []api.Endpoints{
@ -1829,6 +1839,7 @@ func Test_updateEndpoints(t *testing.T) {
endpoint: "2.2.2.2:22", endpoint: "2.2.2.2:22",
servicePortName: makeServicePortName("ns1", "ep1", "p22"), servicePortName: makeServicePortName("ns1", "ep1", "p22"),
}}, }},
expectedHealthchecks: map[types.NamespacedName]int{},
}, { }, {
// Case[12]: rename a port // Case[12]: rename a port
newEndpoints: []api.Endpoints{ newEndpoints: []api.Endpoints{
@ -1858,6 +1869,7 @@ func Test_updateEndpoints(t *testing.T) {
endpoint: "1.1.1.1:11", endpoint: "1.1.1.1:11",
servicePortName: makeServicePortName("ns1", "ep1", "p11"), servicePortName: makeServicePortName("ns1", "ep1", "p11"),
}}, }},
expectedHealthchecks: map[types.NamespacedName]int{},
}, { }, {
// Case[13]: renumber a port // Case[13]: renumber a port
newEndpoints: []api.Endpoints{ newEndpoints: []api.Endpoints{
@ -1887,6 +1899,7 @@ func Test_updateEndpoints(t *testing.T) {
endpoint: "1.1.1.1:11", endpoint: "1.1.1.1:11",
servicePortName: makeServicePortName("ns1", "ep1", "p11"), servicePortName: makeServicePortName("ns1", "ep1", "p11"),
}}, }},
expectedHealthchecks: map[types.NamespacedName]int{},
}, { }, {
// Case[14]: complex add and remove // Case[14]: complex add and remove
newEndpoints: []api.Endpoints{ newEndpoints: []api.Endpoints{
@ -1928,7 +1941,8 @@ func Test_updateEndpoints(t *testing.T) {
makeTestEndpoints("ns4", "ep4", func(ept *api.Endpoints) { makeTestEndpoints("ns4", "ep4", func(ept *api.Endpoints) {
ept.Subsets = []api.EndpointSubset{{ ept.Subsets = []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{ Addresses: []api.EndpointAddress{{
IP: "4.4.4.4", IP: "4.4.4.4",
NodeName: &nodeName,
}}, }},
Ports: []api.EndpointPort{{ Ports: []api.EndpointPort{{
Name: "p44", Name: "p44",
@ -1942,18 +1956,18 @@ func Test_updateEndpoints(t *testing.T) {
{"1.1.1.1:11", false}, {"1.1.1.1:11", false},
}, },
makeServicePortName("ns2", "ep2", "p22"): { makeServicePortName("ns2", "ep2", "p22"): {
{"2.2.2.2:22", false}, {"2.2.2.2:22", true},
{"2.2.2.22:22", false}, {"2.2.2.22:22", true},
}, },
makeServicePortName("ns2", "ep2", "p23"): { makeServicePortName("ns2", "ep2", "p23"): {
{"2.2.2.3:23", false}, {"2.2.2.3:23", true},
}, },
makeServicePortName("ns4", "ep4", "p44"): { makeServicePortName("ns4", "ep4", "p44"): {
{"4.4.4.4:44", false}, {"4.4.4.4:44", true},
{"4.4.4.5:44", false}, {"4.4.4.5:44", true},
}, },
makeServicePortName("ns4", "ep4", "p45"): { makeServicePortName("ns4", "ep4", "p45"): {
{"4.4.4.6:45", false}, {"4.4.4.6:45", true},
}, },
}, },
expectedResult: map[proxy.ServicePortName][]*endpointsInfo{ expectedResult: map[proxy.ServicePortName][]*endpointsInfo{
@ -1971,7 +1985,7 @@ func Test_updateEndpoints(t *testing.T) {
{"3.3.3.3:33", false}, {"3.3.3.3:33", false},
}, },
makeServicePortName("ns4", "ep4", "p44"): { makeServicePortName("ns4", "ep4", "p44"): {
{"4.4.4.4:44", false}, {"4.4.4.4:44", true},
}, },
}, },
expectedStale: []endpointServicePair{{ expectedStale: []endpointServicePair{{
@ -1990,10 +2004,13 @@ func Test_updateEndpoints(t *testing.T) {
endpoint: "4.4.4.6:45", endpoint: "4.4.4.6:45",
servicePortName: makeServicePortName("ns4", "ep4", "p45"), servicePortName: makeServicePortName("ns4", "ep4", "p45"),
}}, }},
expectedHealthchecks: map[types.NamespacedName]int{
makeNSN("ns4", "ep4"): 1,
},
}} }}
for tci, tc := range testCases { for tci, tc := range testCases {
newMap, stale := updateEndpoints(tc.newEndpoints, tc.oldEndpoints, "host", fakeHealthChecker{}) newMap, hcEndpoints, stale := updateEndpoints(tc.newEndpoints, tc.oldEndpoints, nodeName)
if len(newMap) != len(tc.expectedResult) { if len(newMap) != len(tc.expectedResult) {
t.Errorf("[%d] expected %d results, got %d: %v", tci, len(tc.expectedResult), len(newMap), newMap) t.Errorf("[%d] expected %d results, got %d: %v", tci, len(tc.expectedResult), len(newMap), newMap)
} }
@ -2016,6 +2033,9 @@ func Test_updateEndpoints(t *testing.T) {
t.Errorf("[%d] expected stale[%v], but didn't find it: %v", tci, x, stale) t.Errorf("[%d] expected stale[%v], but didn't find it: %v", tci, x, stale)
} }
} }
if !reflect.DeepEqual(hcEndpoints, tc.expectedHealthchecks) {
t.Errorf("[%d] expected healthchecks %v, got %v", tci, tc.expectedHealthchecks, hcEndpoints)
}
} }
} }

View file

@ -55,6 +55,8 @@ import (
"k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac/bootstrappolicy" "k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac/bootstrappolicy"
) )
const PostStartHookName = "rbac/bootstrap-roles"
type RESTStorageProvider struct { type RESTStorageProvider struct {
Authorizer authorizer.Authorizer Authorizer authorizer.Authorizer
} }
@ -123,7 +125,7 @@ func (p RESTStorageProvider) storage(version schema.GroupVersion, apiResourceCon
} }
func (p RESTStorageProvider) PostStartHook() (string, genericapiserver.PostStartHookFunc, error) { func (p RESTStorageProvider) PostStartHook() (string, genericapiserver.PostStartHookFunc, error) {
return "rbac/bootstrap-roles", PostStartHook, nil return PostStartHookName, PostStartHook, nil
} }
func PostStartHook(hookContext genericapiserver.PostStartHookContext) error { func PostStartHook(hookContext genericapiserver.PostStartHookContext) error {

View file

@ -51,7 +51,7 @@ var (
// semantic version is a git hash, but the version itself is no // semantic version is a git hash, but the version itself is no
// longer the direct output of "git describe", but a slight // longer the direct output of "git describe", but a slight
// translation to be semver compliant. // translation to be semver compliant.
gitVersion string = "v1.6.1+$Format:%h$" gitVersion string = "v1.6.4+$Format:%h$"
gitCommit string = "$Format:%H$" // sha1 from git, output of $(git rev-parse HEAD) gitCommit string = "$Format:%H$" // sha1 from git, output of $(git rev-parse HEAD)
gitTreeState string = "not a git tree" // state of git tree, either "clean" or "dirty" gitTreeState string = "not a git tree" // state of git tree, either "clean" or "dirty"

View file

@ -151,7 +151,7 @@ func (plugin *photonPersistentDiskPlugin) ConstructVolumeSpec(volumeSpecName, mo
// Abstract interface to disk operations. // Abstract interface to disk operations.
type pdManager interface { type pdManager interface {
// Creates a volume // Creates a volume
CreateVolume(provisioner *photonPersistentDiskProvisioner) (pdID string, volumeSizeGB int, err error) CreateVolume(provisioner *photonPersistentDiskProvisioner) (pdID string, volumeSizeGB int, fstype string, err error)
// Deletes a volume // Deletes a volume
DeleteVolume(deleter *photonPersistentDiskDeleter) error DeleteVolume(deleter *photonPersistentDiskDeleter) error
} }
@ -342,11 +342,15 @@ func (plugin *photonPersistentDiskPlugin) newProvisionerInternal(options volume.
} }
func (p *photonPersistentDiskProvisioner) Provision() (*v1.PersistentVolume, error) { func (p *photonPersistentDiskProvisioner) Provision() (*v1.PersistentVolume, error) {
pdID, sizeGB, err := p.manager.CreateVolume(p) pdID, sizeGB, fstype, err := p.manager.CreateVolume(p)
if err != nil { if err != nil {
return nil, err return nil, err
} }
if fstype == "" {
fstype = "ext4"
}
pv := &v1.PersistentVolume{ pv := &v1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: p.options.PVName, Name: p.options.PVName,
@ -364,7 +368,7 @@ func (p *photonPersistentDiskProvisioner) Provision() (*v1.PersistentVolume, err
PersistentVolumeSource: v1.PersistentVolumeSource{ PersistentVolumeSource: v1.PersistentVolumeSource{
PhotonPersistentDisk: &v1.PhotonPersistentDiskVolumeSource{ PhotonPersistentDisk: &v1.PhotonPersistentDiskVolumeSource{
PdID: pdID, PdID: pdID,
FSType: "ext4", FSType: fstype,
}, },
}, },
}, },

View file

@ -88,8 +88,8 @@ func contains(modes []v1.PersistentVolumeAccessMode, mode v1.PersistentVolumeAcc
type fakePDManager struct { type fakePDManager struct {
} }
func (fake *fakePDManager) CreateVolume(c *photonPersistentDiskProvisioner) (pdID string, volumeSizeGB int, err error) { func (fake *fakePDManager) CreateVolume(c *photonPersistentDiskProvisioner) (pdID string, volumeSizeGB int, fstype string, err error) {
return "test-photon-pd-id", 10, nil return "test-photon-pd-id", 10, "ext4", nil
} }
func (fake *fakePDManager) DeleteVolume(cd *photonPersistentDiskDeleter) error { func (fake *fakePDManager) DeleteVolume(cd *photonPersistentDiskDeleter) error {

View file

@ -80,11 +80,11 @@ func verifyDevicePath(path string) (string, error) {
} }
// CreateVolume creates a PhotonController persistent disk. // CreateVolume creates a PhotonController persistent disk.
func (util *PhotonDiskUtil) CreateVolume(p *photonPersistentDiskProvisioner) (pdID string, capacityGB int, err error) { func (util *PhotonDiskUtil) CreateVolume(p *photonPersistentDiskProvisioner) (pdID string, capacityGB int, fstype string, err error) {
cloud, err := getCloudProvider(p.plugin.host.GetCloudProvider()) cloud, err := getCloudProvider(p.plugin.host.GetCloudProvider())
if err != nil { if err != nil {
glog.Errorf("Photon Controller Util: CreateVolume failed to get cloud provider. Error [%v]", err) glog.Errorf("Photon Controller Util: CreateVolume failed to get cloud provider. Error [%v]", err)
return "", 0, err return "", 0, "", err
} }
capacity := p.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)] capacity := p.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
@ -102,20 +102,23 @@ func (util *PhotonDiskUtil) CreateVolume(p *photonPersistentDiskProvisioner) (pd
switch strings.ToLower(parameter) { switch strings.ToLower(parameter) {
case "flavor": case "flavor":
volumeOptions.Flavor = value volumeOptions.Flavor = value
case "fstype":
fstype = value
glog.V(4).Infof("Photon Controller Util: Setting fstype to %s", fstype)
default: default:
glog.Errorf("Photon Controller Util: invalid option %s for volume plugin %s.", parameter, p.plugin.GetPluginName()) glog.Errorf("Photon Controller Util: invalid option %s for volume plugin %s.", parameter, p.plugin.GetPluginName())
return "", 0, fmt.Errorf("Photon Controller Util: invalid option %s for volume plugin %s.", parameter, p.plugin.GetPluginName()) return "", 0, "", fmt.Errorf("Photon Controller Util: invalid option %s for volume plugin %s.", parameter, p.plugin.GetPluginName())
} }
} }
pdID, err = cloud.CreateDisk(volumeOptions) pdID, err = cloud.CreateDisk(volumeOptions)
if err != nil { if err != nil {
glog.Errorf("Photon Controller Util: failed to CreateDisk. Error [%v]", err) glog.Errorf("Photon Controller Util: failed to CreateDisk. Error [%v]", err)
return "", 0, err return "", 0, "", err
} }
glog.V(4).Infof("Successfully created Photon Controller persistent disk %s", name) glog.V(4).Infof("Successfully created Photon Controller persistent disk %s", name)
return pdID, volSizeGB, nil return pdID, volSizeGB, "", nil
} }
// DeleteVolume deletes a vSphere volume. // DeleteVolume deletes a vSphere volume.

Some files were not shown because too many files have changed in this diff Show more