Upgrade k8s to v1.6.4
Signed-off-by: Antonio Murdaca <runcom@redhat.com>
This commit is contained in:
parent
d09f928fac
commit
6395319aef
140 changed files with 3568 additions and 1301 deletions
16
lock.json
16
lock.json
|
@ -1,5 +1,5 @@
|
|||
{
|
||||
"memo": "c0b6d8274a7286341387dd0a060d5f1c171bf8658cc76602c6e026880a951a08",
|
||||
"memo": "88a7bc044db73c7ab2adc009d3780db88e39e36cbb96e0f1f4b0636929481543",
|
||||
"projects": [
|
||||
{
|
||||
"name": "cloud.google.com/go",
|
||||
|
@ -176,7 +176,8 @@
|
|||
"revision": "48702e0da86bd25e76cfef347e2adeb434a0d0a6",
|
||||
"packages": [
|
||||
"daemon",
|
||||
"dbus"
|
||||
"dbus",
|
||||
"util"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -184,6 +185,7 @@
|
|||
"version": "v3",
|
||||
"revision": "3ac0863d7acf3bc44daf49afef8919af12f704ef",
|
||||
"packages": [
|
||||
"dlopen",
|
||||
"health",
|
||||
"httputil",
|
||||
"timeutil"
|
||||
|
@ -523,10 +525,14 @@
|
|||
"revision": "b263a43430ac6996a4302b891688544225197294",
|
||||
"packages": [
|
||||
"libcontainer/apparmor",
|
||||
"libcontainer/cgroups",
|
||||
"libcontainer/cgroups/fs",
|
||||
"libcontainer/cgroups/systemd",
|
||||
"libcontainer/configs",
|
||||
"libcontainer/devices",
|
||||
"libcontainer/system",
|
||||
"libcontainer/user"
|
||||
"libcontainer/user",
|
||||
"libcontainer/utils"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -840,8 +846,8 @@
|
|||
},
|
||||
{
|
||||
"name": "k8s.io/kubernetes",
|
||||
"version": "v1.6.1",
|
||||
"revision": "b0b7a323cc5a4a2019b2e9520c21c7830b7f708e",
|
||||
"version": "v1.6.4",
|
||||
"revision": "d6f433224538d4f9ca2f7ae19b252e6fcb66a3ae",
|
||||
"packages": [
|
||||
"pkg/api",
|
||||
"pkg/api/install",
|
||||
|
|
|
@ -9,9 +9,6 @@
|
|||
"github.com/containers/storage": {
|
||||
"branch": "master"
|
||||
},
|
||||
"github.com/opencontainers/image-spec": {
|
||||
"version": "v1.0.0-rc5"
|
||||
},
|
||||
"github.com/docker/distribution": {
|
||||
"branch": "master"
|
||||
},
|
||||
|
@ -21,6 +18,9 @@
|
|||
"github.com/godbus/dbus": {
|
||||
"version": "^4.0.0"
|
||||
},
|
||||
"github.com/opencontainers/image-spec": {
|
||||
"version": "v1.0.0-rc5"
|
||||
},
|
||||
"github.com/opencontainers/runc": {
|
||||
"branch": "master"
|
||||
},
|
||||
|
@ -36,11 +36,11 @@
|
|||
"google.golang.org/grpc": {
|
||||
"version": "v1.0.1-GA"
|
||||
},
|
||||
"k8s.io/kubernetes": {
|
||||
"version": "v1.6.1"
|
||||
},
|
||||
"k8s.io/apimachinery": {
|
||||
"revision": "ae33df8bd0294deb6f1853cc107816dd181e0146"
|
||||
},
|
||||
"k8s.io/kubernetes": {
|
||||
"version": "~v1.6.1"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
16
vendor/k8s.io/kubernetes/WORKSPACE
generated
vendored
16
vendor/k8s.io/kubernetes/WORKSPACE
generated
vendored
|
@ -24,33 +24,39 @@ go_repositories()
|
|||
debs = (
|
||||
(
|
||||
"busybox_deb",
|
||||
"7465567f5e5255188b1d004d7081066cd79f77a5c18a5d418d27966d698e0bef",
|
||||
"http://ftp.us.debian.org/debian/pool/main/b/busybox/busybox-static_1.22.0-19+b2_amd64.deb",
|
||||
"5f81f140777454e71b9e5bfdce9c89993de5ddf4a7295ea1cfda364f8f630947",
|
||||
"http://ftp.us.debian.org/debian/pool/main/b/busybox/busybox-static_1.22.0-19+b3_amd64.deb",
|
||||
"https://storage.googleapis.com/kubernetes-release/debs/busybox-static_1.22.0-19+b3_amd64.deb",
|
||||
),
|
||||
(
|
||||
"libc_deb",
|
||||
"6bbd506b171a9f29b09fde77e2749c0aa0c1439058df9d1a6408d464069b7dd6",
|
||||
"http://ftp.us.debian.org/debian/pool/main/g/glibc/libc6_2.24-9_amd64.deb",
|
||||
"372aac4a9ce9dbb26a08de0b9c41b0500ba019430295d29f39566483f5f32732",
|
||||
"http://ftp.us.debian.org/debian/pool/main/g/glibc/libc6_2.24-10_amd64.deb",
|
||||
"https://storage.googleapis.com/kubernetes-release/debs/libc6_2.24-10_amd64.deb",
|
||||
),
|
||||
(
|
||||
"iptables_deb",
|
||||
"7747388a97ba71fede302d70361c81d486770a2024185514c18b5d8eab6aaf4e",
|
||||
"http://ftp.us.debian.org/debian/pool/main/i/iptables/iptables_1.4.21-2+b1_amd64.deb",
|
||||
"https://storage.googleapis.com/kubernetes-release/debs/iptables_1.4.21-2+b1_amd64.deb",
|
||||
),
|
||||
(
|
||||
"libnetlink_deb",
|
||||
"5d486022cd9e047e9afbb1617cf4519c0decfc3d2c1fad7e7fe5604943dbbf37",
|
||||
"http://ftp.us.debian.org/debian/pool/main/libn/libnfnetlink/libnfnetlink0_1.0.1-3_amd64.deb",
|
||||
"https://storage.googleapis.com/kubernetes-release/debs/libnfnetlink0_1.0.1-3_amd64.deb",
|
||||
),
|
||||
(
|
||||
"libxtables_deb",
|
||||
"6783f316af4cbf3ada8b9a2b7bb5f53a87c0c2575c1903ce371fdbd45d3626c6",
|
||||
"http://ftp.us.debian.org/debian/pool/main/i/iptables/libxtables10_1.4.21-2+b1_amd64.deb",
|
||||
"https://storage.googleapis.com/kubernetes-release/debs/libxtables10_1.4.21-2+b1_amd64.deb",
|
||||
),
|
||||
(
|
||||
"iproute2_deb",
|
||||
"3ce9cb1d03a2a1359cbdd4f863b15d0c906096bf713e8eb688149da2f4e350bc",
|
||||
"http://ftp.us.debian.org/debian/pool/main/i/iproute2/iproute_3.16.0-2_all.deb",
|
||||
"https://storage.googleapis.com/kubernetes-release/debs/iproute_3.16.0-2_all.deb",
|
||||
),
|
||||
)
|
||||
|
||||
|
@ -58,7 +64,7 @@ debs = (
|
|||
name = name,
|
||||
sha256 = sha256,
|
||||
url = url,
|
||||
) for name, sha256, url in debs]
|
||||
) for name, sha256, origin, url in debs]
|
||||
|
||||
http_file(
|
||||
name = "kubernetes_cni",
|
||||
|
|
4
vendor/k8s.io/kubernetes/api/openapi-spec/swagger.json
generated
vendored
4
vendor/k8s.io/kubernetes/api/openapi-spec/swagger.json
generated
vendored
|
@ -2,7 +2,7 @@
|
|||
"swagger": "2.0",
|
||||
"info": {
|
||||
"title": "Kubernetes",
|
||||
"version": "v1.6.1"
|
||||
"version": "v1.6.4"
|
||||
},
|
||||
"paths": {
|
||||
"/api/": {
|
||||
|
@ -43262,7 +43262,7 @@
|
|||
],
|
||||
"properties": {
|
||||
"concurrencyPolicy": {
|
||||
"description": "ConcurrencyPolicy specifies how to treat concurrent executions of a Job.",
|
||||
"description": "ConcurrencyPolicy specifies how to treat concurrent executions of a Job. Defaults to Allow.",
|
||||
"type": "string"
|
||||
},
|
||||
"failedJobsHistoryLimit": {
|
||||
|
|
1
vendor/k8s.io/kubernetes/cluster/addons/dns/kubedns-sa.yaml
generated
vendored
1
vendor/k8s.io/kubernetes/cluster/addons/dns/kubedns-sa.yaml
generated
vendored
|
@ -2,6 +2,7 @@ apiVersion: v1
|
|||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: kube-dns
|
||||
namespace: kube-system
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
|
|
9
vendor/k8s.io/kubernetes/cluster/addons/fluentd-gcp/fluentd-gcp-configmap.yaml
generated
vendored
9
vendor/k8s.io/kubernetes/cluster/addons/fluentd-gcp/fluentd-gcp-configmap.yaml
generated
vendored
|
@ -79,9 +79,6 @@ data:
|
|||
type counter
|
||||
name logging_line_count
|
||||
desc Total number of lines generated by application containers
|
||||
<labels>
|
||||
tag ${tag}
|
||||
</labels>
|
||||
</metric>
|
||||
</store>
|
||||
<store>
|
||||
|
@ -342,9 +339,8 @@ data:
|
|||
<metric>
|
||||
type counter
|
||||
name logging_entry_count
|
||||
desc Total number of log entries generated by either an application container or a system component
|
||||
desc Total number of log entries generated by either application containers or system components
|
||||
<labels>
|
||||
tag ${tag}
|
||||
component container
|
||||
</labels>
|
||||
</metric>
|
||||
|
@ -376,9 +372,8 @@ data:
|
|||
<metric>
|
||||
type counter
|
||||
name logging_entry_count
|
||||
desc Total number of log entries generated by either an application container or a system component
|
||||
desc Total number of log entries generated by either application containers or system components
|
||||
<labels>
|
||||
tag ${tag}
|
||||
component system
|
||||
</labels>
|
||||
</metric>
|
||||
|
|
14
vendor/k8s.io/kubernetes/cluster/common.sh
generated
vendored
14
vendor/k8s.io/kubernetes/cluster/common.sh
generated
vendored
|
@ -1,6 +1,6 @@
|
|||
#!/bin/bash
|
||||
|
||||
# Copyright 2015 The Kubernetes Authors.
|
||||
# Copyright 2017 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
|
@ -461,8 +461,8 @@ function find-release-tars() {
|
|||
|
||||
# This tarball is used by GCI, Ubuntu Trusty, and Container Linux.
|
||||
KUBE_MANIFESTS_TAR=
|
||||
if [[ "${MASTER_OS_DISTRIBUTION:-}" == "trusty" || "${MASTER_OS_DISTRIBUTION:-}" == "gci" || "${MASTER_OS_DISTRIBUTION:-}" == "container-linux" ]] || \
|
||||
[[ "${NODE_OS_DISTRIBUTION:-}" == "trusty" || "${NODE_OS_DISTRIBUTION:-}" == "gci" || "${NODE_OS_DISTRIBUTION:-}" == "container-linux" ]] ; then
|
||||
if [[ "${MASTER_OS_DISTRIBUTION:-}" == "trusty" || "${MASTER_OS_DISTRIBUTION:-}" == "gci" || "${MASTER_OS_DISTRIBUTION:-}" == "container-linux" || "${MASTER_OS_DISTRIBUTION:-}" == "ubuntu" ]] || \
|
||||
[[ "${NODE_OS_DISTRIBUTION:-}" == "trusty" || "${NODE_OS_DISTRIBUTION:-}" == "gci" || "${NODE_OS_DISTRIBUTION:-}" == "container-linux" || "${NODE_OS_DISTRIBUTION:-}" == "ubuntu" ]] ; then
|
||||
KUBE_MANIFESTS_TAR=$(find-tar kubernetes-manifests.tar.gz)
|
||||
fi
|
||||
}
|
||||
|
@ -598,7 +598,9 @@ function build-kube-env {
|
|||
local salt_tar_url=$SALT_TAR_URL
|
||||
local kube_manifests_tar_url="${KUBE_MANIFESTS_TAR_URL:-}"
|
||||
if [[ "${master}" == "true" && "${MASTER_OS_DISTRIBUTION}" == "container-linux" ]] || \
|
||||
[[ "${master}" == "false" && "${NODE_OS_DISTRIBUTION}" == "container-linux" ]] ; then
|
||||
[[ "${master}" == "false" && "${NODE_OS_DISTRIBUTION}" == "container-linux" ]] || \
|
||||
[[ "${master}" == "true" && "${MASTER_OS_DISTRIBUTION}" == "ubuntu" ]] || \
|
||||
[[ "${master}" == "false" && "${NODE_OS_DISTRIBUTION}" == "ubuntu" ]] ; then
|
||||
# TODO: Support fallback .tar.gz settings on Container Linux
|
||||
server_binary_tar_url=$(split_csv "${SERVER_BINARY_TAR_URL}")
|
||||
salt_tar_url=$(split_csv "${SALT_TAR_URL}")
|
||||
|
@ -682,8 +684,8 @@ EOF
|
|||
TERMINATED_POD_GC_THRESHOLD: $(yaml-quote ${TERMINATED_POD_GC_THRESHOLD})
|
||||
EOF
|
||||
fi
|
||||
if [[ "${master}" == "true" && ("${MASTER_OS_DISTRIBUTION}" == "trusty" || "${MASTER_OS_DISTRIBUTION}" == "gci" || "${MASTER_OS_DISTRIBUTION}" == "container-linux") ]] || \
|
||||
[[ "${master}" == "false" && ("${NODE_OS_DISTRIBUTION}" == "trusty" || "${NODE_OS_DISTRIBUTION}" == "gci" || "${NODE_OS_DISTRIBUTION}" == "container-linux") ]] ; then
|
||||
if [[ "${master}" == "true" && ("${MASTER_OS_DISTRIBUTION}" == "trusty" || "${MASTER_OS_DISTRIBUTION}" == "gci" || "${MASTER_OS_DISTRIBUTION}" == "container-linux") || "${MASTER_OS_DISTRIBUTION}" == "ubuntu" ]] || \
|
||||
[[ "${master}" == "false" && ("${NODE_OS_DISTRIBUTION}" == "trusty" || "${NODE_OS_DISTRIBUTION}" == "gci" || "${NODE_OS_DISTRIBUTION}" == "container-linux") || "${NODE_OS_DISTRIBUTION}" = "ubuntu" ]] ; then
|
||||
cat >>$file <<EOF
|
||||
KUBE_MANIFESTS_TAR_URL: $(yaml-quote ${kube_manifests_tar_url})
|
||||
KUBE_MANIFESTS_TAR_HASH: $(yaml-quote ${KUBE_MANIFESTS_TAR_HASH})
|
||||
|
|
3
vendor/k8s.io/kubernetes/cluster/gce/gci/configure-helper.sh
generated
vendored
3
vendor/k8s.io/kubernetes/cluster/gce/gci/configure-helper.sh
generated
vendored
|
@ -999,6 +999,9 @@ function start-kube-apiserver {
|
|||
params+=" --kubelet-client-certificate=${APISERVER_CLIENT_CERT_PATH}"
|
||||
params+=" --kubelet-client-key=${APISERVER_CLIENT_KEY_PATH}"
|
||||
fi
|
||||
if [[ -n "${SERVICEACCOUNT_CERT_PATH:-}" ]]; then
|
||||
params+=" --service-account-key-file=${SERVICEACCOUNT_CERT_PATH}"
|
||||
fi
|
||||
params+=" --token-auth-file=/etc/srv/kubernetes/known_tokens.csv"
|
||||
if [[ -n "${KUBE_PASSWORD:-}" && -n "${KUBE_USER:-}" ]]; then
|
||||
params+=" --basic-auth-file=/etc/srv/kubernetes/basic_auth.csv"
|
||||
|
|
4
vendor/k8s.io/kubernetes/cluster/gce/gci/master.yaml
generated
vendored
4
vendor/k8s.io/kubernetes/cluster/gce/gci/master.yaml
generated
vendored
|
@ -112,6 +112,9 @@ write_files:
|
|||
[Unit]
|
||||
Description=Kubernetes
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
|
||||
runcmd:
|
||||
- systemctl daemon-reload
|
||||
- systemctl enable kube-master-installation.service
|
||||
|
@ -120,4 +123,5 @@ runcmd:
|
|||
- systemctl enable kubelet-monitor.service
|
||||
- systemctl enable kube-logrotate.timer
|
||||
- systemctl enable kube-logrotate.service
|
||||
- systemctl enable kubernetes.target
|
||||
- systemctl start kubernetes.target
|
||||
|
|
4
vendor/k8s.io/kubernetes/cluster/gce/gci/node.yaml
generated
vendored
4
vendor/k8s.io/kubernetes/cluster/gce/gci/node.yaml
generated
vendored
|
@ -112,6 +112,9 @@ write_files:
|
|||
[Unit]
|
||||
Description=Kubernetes
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
|
||||
runcmd:
|
||||
- systemctl daemon-reload
|
||||
- systemctl enable kube-node-installation.service
|
||||
|
@ -120,4 +123,5 @@ runcmd:
|
|||
- systemctl enable kubelet-monitor.service
|
||||
- systemctl enable kube-logrotate.timer
|
||||
- systemctl enable kube-logrotate.service
|
||||
- systemctl enable kubernetes.target
|
||||
- systemctl start kubernetes.target
|
||||
|
|
10
vendor/k8s.io/kubernetes/cluster/gce/util.sh
generated
vendored
10
vendor/k8s.io/kubernetes/cluster/gce/util.sh
generated
vendored
|
@ -1,6 +1,6 @@
|
|||
#!/bin/bash
|
||||
|
||||
# Copyright 2014 The Kubernetes Authors.
|
||||
# Copyright 2017 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
|
@ -23,14 +23,14 @@ source "${KUBE_ROOT}/cluster/gce/${KUBE_CONFIG_FILE-"config-default.sh"}"
|
|||
source "${KUBE_ROOT}/cluster/common.sh"
|
||||
source "${KUBE_ROOT}/cluster/lib/util.sh"
|
||||
|
||||
if [[ "${NODE_OS_DISTRIBUTION}" == "debian" || "${NODE_OS_DISTRIBUTION}" == "container-linux" || "${NODE_OS_DISTRIBUTION}" == "trusty" || "${NODE_OS_DISTRIBUTION}" == "gci" ]]; then
|
||||
if [[ "${NODE_OS_DISTRIBUTION}" == "debian" || "${NODE_OS_DISTRIBUTION}" == "container-linux" || "${NODE_OS_DISTRIBUTION}" == "trusty" || "${NODE_OS_DISTRIBUTION}" == "gci" || "${NODE_OS_DISTRIBUTION}" == "ubuntu" ]]; then
|
||||
source "${KUBE_ROOT}/cluster/gce/${NODE_OS_DISTRIBUTION}/node-helper.sh"
|
||||
else
|
||||
echo "Cannot operate on cluster using node os distro: ${NODE_OS_DISTRIBUTION}" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ "${MASTER_OS_DISTRIBUTION}" == "container-linux" || "${MASTER_OS_DISTRIBUTION}" == "trusty" || "${MASTER_OS_DISTRIBUTION}" == "gci" ]]; then
|
||||
if [[ "${MASTER_OS_DISTRIBUTION}" == "container-linux" || "${MASTER_OS_DISTRIBUTION}" == "trusty" || "${MASTER_OS_DISTRIBUTION}" == "gci" || "${MASTER_OS_DISTRIBUTION}" == "ubuntu" ]]; then
|
||||
source "${KUBE_ROOT}/cluster/gce/${MASTER_OS_DISTRIBUTION}/master-helper.sh"
|
||||
else
|
||||
echo "Cannot operate on cluster using master os distro: ${MASTER_OS_DISTRIBUTION}" >&2
|
||||
|
@ -592,8 +592,8 @@ function kube-up() {
|
|||
parse-master-env
|
||||
create-nodes
|
||||
elif [[ ${KUBE_REPLICATE_EXISTING_MASTER:-} == "true" ]]; then
|
||||
if [[ "${MASTER_OS_DISTRIBUTION}" != "gci" && "${MASTER_OS_DISTRIBUTION}" != "debian" ]]; then
|
||||
echo "Master replication supported only for gci and debian"
|
||||
if [[ "${MASTER_OS_DISTRIBUTION}" != "gci" && "${MASTER_OS_DISTRIBUTION}" != "debian" && "${MASTER_OS_DISTRIBUTION}" != "ubuntu" ]]; then
|
||||
echo "Master replication supported only for gci, debian, and ubuntu"
|
||||
return 1
|
||||
fi
|
||||
create-loadbalancer
|
||||
|
|
1
vendor/k8s.io/kubernetes/cluster/gke/config-test.sh
generated
vendored
1
vendor/k8s.io/kubernetes/cluster/gke/config-test.sh
generated
vendored
|
@ -19,6 +19,7 @@ CLUSTER_NAME="${CLUSTER_NAME:-${USER}-gke-e2e}"
|
|||
NETWORK=${KUBE_GKE_NETWORK:-e2e}
|
||||
NODE_TAG="k8s-${CLUSTER_NAME}-node"
|
||||
IMAGE_TYPE="${KUBE_GKE_IMAGE_TYPE:-container_vm}"
|
||||
ENABLE_KUBERNETES_ALPHA="${KUBE_GKE_ENABLE_KUBERNETES_ALPHA:-}"
|
||||
|
||||
KUBE_DELETE_NETWORK=${KUBE_DELETE_NETWORK:-true}
|
||||
|
||||
|
|
5
vendor/k8s.io/kubernetes/cluster/gke/util.sh
generated
vendored
5
vendor/k8s.io/kubernetes/cluster/gke/util.sh
generated
vendored
|
@ -135,6 +135,7 @@ function validate-cluster {
|
|||
# HEAPSTER_MACHINE_TYPE (optional)
|
||||
# CLUSTER_IP_RANGE (optional)
|
||||
# GKE_CREATE_FLAGS (optional, space delineated)
|
||||
# ENABLE_KUBERNETES_ALPHA (optional)
|
||||
function kube-up() {
|
||||
echo "... in gke:kube-up()" >&2
|
||||
detect-project >&2
|
||||
|
@ -184,6 +185,10 @@ function kube-up() {
|
|||
"--machine-type=${MACHINE_TYPE}"
|
||||
)
|
||||
|
||||
if [[ ! -z "${ENABLE_KUBERNETES_ALPHA:-}" ]]; then
|
||||
create_args+=("--enable-kubernetes-alpha")
|
||||
fi
|
||||
|
||||
if [[ ! -z "${ADDITIONAL_ZONES:-}" ]]; then
|
||||
create_args+=("--additional-zones=${ADDITIONAL_ZONES}")
|
||||
fi
|
||||
|
|
|
@ -25,12 +25,12 @@
|
|||
"containers": [
|
||||
{
|
||||
"name": "cluster-autoscaler",
|
||||
"image": "gcr.io/google_containers/cluster-autoscaler:v0.5.1",
|
||||
"image": "gcr.io/google_containers/cluster-autoscaler:v0.5.4",
|
||||
"command": [
|
||||
"./run.sh",
|
||||
"--kubernetes=http://127.0.0.1:8080?inClusterConfig=f",
|
||||
"--v=4",
|
||||
"--stderrthreshold=info",
|
||||
"--logtostderr=true",
|
||||
"--write-status-configmap=true",
|
||||
"{{params}}"
|
||||
],
|
||||
|
|
8
vendor/k8s.io/kubernetes/cluster/saltbase/salt/l7-gcp/glbc.manifest
generated
vendored
8
vendor/k8s.io/kubernetes/cluster/saltbase/salt/l7-gcp/glbc.manifest
generated
vendored
|
@ -1,18 +1,18 @@
|
|||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: l7-lb-controller-v0.9.2
|
||||
name: l7-lb-controller-v0.9.3
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: glbc
|
||||
version: v0.9.2
|
||||
version: v0.9.3
|
||||
kubernetes.io/cluster-service: "true"
|
||||
kubernetes.io/name: "GLBC"
|
||||
spec:
|
||||
terminationGracePeriodSeconds: 600
|
||||
hostNetwork: true
|
||||
containers:
|
||||
- image: gcr.io/google_containers/glbc:0.9.2
|
||||
- image: gcr.io/google_containers/glbc:0.9.3
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
|
@ -43,7 +43,7 @@ spec:
|
|||
# TODO: split this out into args when we no longer need to pipe stdout to a file #6428
|
||||
- sh
|
||||
- -c
|
||||
- '/glbc --verbose=true --default-backend-service=kube-system/default-http-backend --sync-period=600s --running-in-cluster=false --use-real-cloud=true --config-file-path=/etc/gce.conf --healthz-port=8086 1>>/var/log/glbc.log 2>&1'
|
||||
- '/glbc --verbose=true --apiserver-host=http://localhost:8080 --default-backend-service=kube-system/default-http-backend --sync-period=600s --running-in-cluster=false --use-real-cloud=true --config-file-path=/etc/gce.conf --healthz-port=8086 1>>/var/log/glbc.log 2>&1'
|
||||
volumes:
|
||||
- hostPath:
|
||||
path: /etc/gce.conf
|
||||
|
|
2
vendor/k8s.io/kubernetes/cmd/kube-apiserver/app/BUILD
generated
vendored
2
vendor/k8s.io/kubernetes/cmd/kube-apiserver/app/BUILD
generated
vendored
|
@ -31,9 +31,11 @@ go_library(
|
|||
"//pkg/kubeapiserver:go_default_library",
|
||||
"//pkg/kubeapiserver/admission:go_default_library",
|
||||
"//pkg/kubeapiserver/authenticator:go_default_library",
|
||||
"//pkg/kubeapiserver/authorizer/modes:go_default_library",
|
||||
"//pkg/master:go_default_library",
|
||||
"//pkg/master/tunneler:go_default_library",
|
||||
"//pkg/registry/cachesize:go_default_library",
|
||||
"//pkg/registry/rbac/rest:go_default_library",
|
||||
"//pkg/version:go_default_library",
|
||||
"//plugin/pkg/admission/admit:go_default_library",
|
||||
"//plugin/pkg/admission/alwayspullimages:go_default_library",
|
||||
|
|
5
vendor/k8s.io/kubernetes/cmd/kube-apiserver/app/server.go
generated
vendored
5
vendor/k8s.io/kubernetes/cmd/kube-apiserver/app/server.go
generated
vendored
|
@ -63,9 +63,11 @@ import (
|
|||
"k8s.io/kubernetes/pkg/kubeapiserver"
|
||||
kubeadmission "k8s.io/kubernetes/pkg/kubeapiserver/admission"
|
||||
kubeauthenticator "k8s.io/kubernetes/pkg/kubeapiserver/authenticator"
|
||||
"k8s.io/kubernetes/pkg/kubeapiserver/authorizer/modes"
|
||||
"k8s.io/kubernetes/pkg/master"
|
||||
"k8s.io/kubernetes/pkg/master/tunneler"
|
||||
"k8s.io/kubernetes/pkg/registry/cachesize"
|
||||
rbacrest "k8s.io/kubernetes/pkg/registry/rbac/rest"
|
||||
"k8s.io/kubernetes/pkg/version"
|
||||
"k8s.io/kubernetes/plugin/pkg/auth/authenticator/token/bootstrap"
|
||||
)
|
||||
|
@ -324,6 +326,9 @@ func BuildMasterConfig(s *options.ServerRunOptions) (*master.Config, informers.S
|
|||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("invalid authentication config: %v", err)
|
||||
}
|
||||
if !sets.NewString(s.Authorization.Modes()...).Has(modes.ModeRBAC) {
|
||||
genericConfig.DisabledPostStartHooks.Insert(rbacrest.PostStartHookName)
|
||||
}
|
||||
|
||||
authorizationConfig := s.Authorization.ToAuthorizationConfig(sharedInformers)
|
||||
apiAuthorizer, err := authorizationConfig.New()
|
||||
|
|
16
vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1alpha1/defaults.go
generated
vendored
16
vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1alpha1/defaults.go
generated
vendored
|
@ -26,14 +26,12 @@ import (
|
|||
const (
|
||||
DefaultServiceDNSDomain = "cluster.local"
|
||||
DefaultServicesSubnet = "10.96.0.0/12"
|
||||
DefaultKubernetesVersion = "latest-1.6"
|
||||
// This is only for clusters without internet, were the latest stable version can't be determined
|
||||
DefaultKubernetesFallbackVersion = "v1.6.0"
|
||||
DefaultAPIBindPort = 6443
|
||||
DefaultDiscoveryBindPort = 9898
|
||||
DefaultAuthorizationMode = "RBAC"
|
||||
DefaultCACertPath = "/etc/kubernetes/pki/ca.crt"
|
||||
DefaultCertificatesDir = "/etc/kubernetes/pki"
|
||||
DefaultKubernetesVersion = "stable-1.6"
|
||||
DefaultAPIBindPort = 6443
|
||||
DefaultDiscoveryBindPort = 9898
|
||||
DefaultAuthorizationMode = "RBAC"
|
||||
DefaultCACertPath = "/etc/kubernetes/pki/ca.crt"
|
||||
DefaultCertificatesDir = "/etc/kubernetes/pki"
|
||||
)
|
||||
|
||||
func addDefaultingFuncs(scheme *runtime.Scheme) error {
|
||||
|
@ -46,7 +44,7 @@ func addDefaultingFuncs(scheme *runtime.Scheme) error {
|
|||
|
||||
func SetDefaults_MasterConfiguration(obj *MasterConfiguration) {
|
||||
if obj.KubernetesVersion == "" {
|
||||
obj.KubernetesVersion = DefaultKubernetesFallbackVersion
|
||||
obj.KubernetesVersion = DefaultKubernetesVersion
|
||||
}
|
||||
|
||||
if obj.API.BindPort == 0 {
|
||||
|
|
7
vendor/k8s.io/kubernetes/cmd/kubeadm/app/cmd/defaults.go
generated
vendored
7
vendor/k8s.io/kubernetes/cmd/kubeadm/app/cmd/defaults.go
generated
vendored
|
@ -22,7 +22,6 @@ import (
|
|||
|
||||
netutil "k8s.io/apimachinery/pkg/util/net"
|
||||
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
|
||||
kubeadmapiext "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1alpha1"
|
||||
kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants"
|
||||
kubeadmutil "k8s.io/kubernetes/cmd/kubeadm/app/util"
|
||||
tokenutil "k8s.io/kubernetes/cmd/kubeadm/app/util/token"
|
||||
|
@ -45,11 +44,7 @@ func setInitDynamicDefaults(cfg *kubeadmapi.MasterConfiguration) error {
|
|||
// Validate version argument
|
||||
ver, err := kubeadmutil.KubernetesReleaseVersion(cfg.KubernetesVersion)
|
||||
if err != nil {
|
||||
if cfg.KubernetesVersion != kubeadmapiext.DefaultKubernetesVersion {
|
||||
return err
|
||||
} else {
|
||||
ver = kubeadmapiext.DefaultKubernetesFallbackVersion
|
||||
}
|
||||
return err
|
||||
}
|
||||
cfg.KubernetesVersion = ver
|
||||
|
||||
|
|
6
vendor/k8s.io/kubernetes/cmd/kubeadm/app/cmd/reset.go
generated
vendored
6
vendor/k8s.io/kubernetes/cmd/kubeadm/app/cmd/reset.go
generated
vendored
|
@ -105,16 +105,16 @@ func (r *Reset) Run(out io.Writer) error {
|
|||
}
|
||||
|
||||
dockerCheck := preflight.ServiceCheck{Service: "docker", CheckIfActive: true}
|
||||
if warnings, errors := dockerCheck.Check(); len(warnings) == 0 && len(errors) == 0 {
|
||||
if _, errors := dockerCheck.Check(); len(errors) == 0 {
|
||||
fmt.Println("[reset] Removing kubernetes-managed containers")
|
||||
if err := exec.Command("sh", "-c", "docker ps | grep 'k8s_' | awk '{print $1}' | xargs -r docker rm --force --volumes").Run(); err != nil {
|
||||
if err := exec.Command("sh", "-c", "docker ps -a --filter name=k8s_ -q | xargs -r docker rm --force --volumes").Run(); err != nil {
|
||||
fmt.Println("[reset] Failed to stop the running containers")
|
||||
}
|
||||
} else {
|
||||
fmt.Println("[reset] docker doesn't seem to be running, skipping the removal of running kubernetes containers")
|
||||
}
|
||||
|
||||
dirsToClean := []string{"/var/lib/kubelet", "/etc/cni/net.d"}
|
||||
dirsToClean := []string{"/var/lib/kubelet", "/etc/cni/net.d", "/var/lib/dockershim"}
|
||||
|
||||
// Only clear etcd data when the etcd manifest is found. In case it is not found, we must assume that the user
|
||||
// provided external etcd endpoints. In that case, it is his own responsibility to reset etcd
|
||||
|
|
2
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/apiconfig/setupmaster.go
generated
vendored
2
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/apiconfig/setupmaster.go
generated
vendored
|
@ -61,7 +61,7 @@ func attemptToUpdateMasterRoleLabelsAndTaints(client *clientset.Clientset) error
|
|||
|
||||
// The master node is tainted and labelled accordingly
|
||||
n.ObjectMeta.Labels[kubeadmconstants.LabelNodeRoleMaster] = ""
|
||||
n.Spec.Taints = []v1.Taint{{Key: kubeadmconstants.LabelNodeRoleMaster, Value: "", Effect: "NoSchedule"}}
|
||||
n.Spec.Taints = append(n.Spec.Taints, v1.Taint{Key: kubeadmconstants.LabelNodeRoleMaster, Value: "", Effect: "NoSchedule"})
|
||||
|
||||
newData, err := json.Marshal(n)
|
||||
if err != nil {
|
||||
|
|
2
vendor/k8s.io/kubernetes/examples/persistent-volume-provisioning/README.md
generated
vendored
2
vendor/k8s.io/kubernetes/examples/persistent-volume-provisioning/README.md
generated
vendored
|
@ -61,9 +61,11 @@ metadata:
|
|||
provisioner: kubernetes.io/vsphere-volume
|
||||
parameters:
|
||||
diskformat: eagerzeroedthick
|
||||
fstype: ext3
|
||||
```
|
||||
|
||||
* `diskformat`: `thin`, `zeroedthick` and `eagerzeroedthick`. See vSphere docs for details. Default: `"thin"`.
|
||||
* `fstype`: fstype that are supported by kubernetes. Default: `"ext4"`.
|
||||
|
||||
#### Portworx Volume
|
||||
|
||||
|
|
307
vendor/k8s.io/kubernetes/examples/volumes/vsphere/README.md
generated
vendored
307
vendor/k8s.io/kubernetes/examples/volumes/vsphere/README.md
generated
vendored
|
@ -5,6 +5,8 @@
|
|||
- [Volumes](#volumes)
|
||||
- [Persistent Volumes](#persistent-volumes)
|
||||
- [Storage Class](#storage-class)
|
||||
- [Virtual SAN policy support inside Kubernetes](#virtual-san-policy-support-inside-kubernetes)
|
||||
- [Stateful Set](#stateful-set)
|
||||
|
||||
## Prerequisites
|
||||
|
||||
|
@ -212,6 +214,7 @@
|
|||
provisioner: kubernetes.io/vsphere-volume
|
||||
parameters:
|
||||
diskformat: zeroedthick
|
||||
fstype: ext3
|
||||
```
|
||||
|
||||
[Download example](vsphere-volume-sc-fast.yaml?raw=true)
|
||||
|
@ -243,10 +246,11 @@
|
|||
|
||||
``` bash
|
||||
$ kubectl describe storageclass fast
|
||||
Name: fast
|
||||
Annotations: <none>
|
||||
Provisioner: kubernetes.io/vsphere-volume
|
||||
Parameters: diskformat=zeroedthick
|
||||
Name: fast
|
||||
IsDefaultClass: No
|
||||
Annotations: <none>
|
||||
Provisioner: kubernetes.io/vsphere-volume
|
||||
Parameters: diskformat=zeroedthick,fstype=ext3
|
||||
No events.
|
||||
```
|
||||
|
||||
|
@ -281,14 +285,19 @@
|
|||
|
||||
``` bash
|
||||
$ kubectl describe pvc pvcsc001
|
||||
Name: pvcsc001
|
||||
Namespace: default
|
||||
Status: Bound
|
||||
Volume: pvc-80f7b5c1-94b6-11e6-a24f-005056a79d2d
|
||||
Labels: <none>
|
||||
Capacity: 2Gi
|
||||
Access Modes: RWO
|
||||
No events.
|
||||
Name: pvcsc001
|
||||
Namespace: default
|
||||
StorageClass: fast
|
||||
Status: Bound
|
||||
Volume: pvc-83295256-f8e0-11e6-8263-005056b2349c
|
||||
Labels: <none>
|
||||
Capacity: 2Gi
|
||||
Access Modes: RWO
|
||||
Events:
|
||||
FirstSeen LastSeen Count From SubObjectPath Type Reason Message
|
||||
--------- -------- ----- ---- ------------- -------- ------ -------
|
||||
1m 1m 1 persistentvolume-controller Normal ProvisioningSucceeded Successfully provisioned volume pvc-83295256-f8e0-11e6-8263-005056b2349c using kubernetes.io/vsphere-volume
|
||||
|
||||
```
|
||||
|
||||
Persistent Volume is automatically created and is bounded to this pvc.
|
||||
|
@ -296,19 +305,20 @@
|
|||
Verifying persistent volume claim is created:
|
||||
|
||||
``` bash
|
||||
$ kubectl describe pv pvc-80f7b5c1-94b6-11e6-a24f-005056a79d2d
|
||||
Name: pvc-80f7b5c1-94b6-11e6-a24f-005056a79d2d
|
||||
Labels: <none>
|
||||
Status: Bound
|
||||
Claim: default/pvcsc001
|
||||
Reclaim Policy: Delete
|
||||
Access Modes: RWO
|
||||
Capacity: 2Gi
|
||||
$ kubectl describe pv pvc-83295256-f8e0-11e6-8263-005056b2349c
|
||||
Name: pvc-83295256-f8e0-11e6-8263-005056b2349c
|
||||
Labels: <none>
|
||||
StorageClass: fast
|
||||
Status: Bound
|
||||
Claim: default/pvcsc001
|
||||
Reclaim Policy: Delete
|
||||
Access Modes: RWO
|
||||
Capacity: 2Gi
|
||||
Message:
|
||||
Source:
|
||||
Type: vSphereVolume (a Persistent Disk resource in vSphere)
|
||||
VolumePath: [datastore1] kubevols/kubernetes-dynamic-pvc-80f7b5c1-94b6-11e6-a24f-005056a79d2d.vmdk
|
||||
FSType: ext4
|
||||
Type: vSphereVolume (a Persistent Disk resource in vSphere)
|
||||
VolumePath: [datastore1] kubevols/kubernetes-dynamic-pvc-83295256-f8e0-11e6-8263-005056b2349c.vmdk
|
||||
FSType: ext3
|
||||
No events.
|
||||
```
|
||||
|
||||
|
@ -353,6 +363,257 @@
|
|||
pvpod 1/1 Running 0 48m
|
||||
```
|
||||
|
||||
### Virtual SAN policy support inside Kubernetes
|
||||
|
||||
Vsphere Infrastructure(VI) Admins will have the ability to specify custom Virtual SAN Storage Capabilities during dynamic volume provisioning. You can now define storage requirements, such as performance and availability, in the form of storage capabilities during dynamic volume provisioning. The storage capability requirements are converted into a Virtual SAN policy which are then pushed down to the Virtual SAN layer when a persistent volume (virtual disk) is being created. The virtual disk is distributed across the Virtual SAN datastore to meet the requirements.
|
||||
|
||||
The official [VSAN policy documentation](https://pubs.vmware.com/vsphere-65/index.jsp?topic=%2Fcom.vmware.vsphere.virtualsan.doc%2FGUID-08911FD3-2462-4C1C-AE81-0D4DBC8F7990.html) describes in detail about each of the individual storage capabilities that are supported by VSAN. The user can specify these storage capabilities as part of storage class defintion based on his application needs.
|
||||
|
||||
The policy settings can be one or more of the following:
|
||||
|
||||
* *hostFailuresToTolerate*: represents NumberOfFailuresToTolerate
|
||||
* *diskStripes*: represents NumberofDiskStripesPerObject
|
||||
* *objectSpaceReservation*: represents ObjectSpaceReservation
|
||||
* *cacheReservation*: represents FlashReadCacheReservation
|
||||
* *iopsLimit*: represents IOPSLimitForObject
|
||||
* *forceProvisioning*: represents if volume must be Force Provisioned
|
||||
|
||||
__Note: Here you don't need to create persistent volume it is created for you.__
|
||||
1. Create Storage Class.
|
||||
|
||||
Example 1:
|
||||
|
||||
```yaml
|
||||
kind: StorageClass
|
||||
apiVersion: storage.k8s.io/v1beta1
|
||||
metadata:
|
||||
name: fast
|
||||
provisioner: kubernetes.io/vsphere-volume
|
||||
parameters:
|
||||
diskformat: zeroedthick
|
||||
hostFailuresToTolerate: "2"
|
||||
cachereservation: "20"
|
||||
```
|
||||
[Download example](vsphere-volume-sc-vsancapabilities.yaml?raw=true)
|
||||
|
||||
Here a persistent volume will be created with the Virtual SAN capabilities - hostFailuresToTolerate to 2 and cachereservation is 20% read cache reserved for storage object. Also the persistent volume will be *zeroedthick* disk.
|
||||
The official [VSAN policy documentation](https://pubs.vmware.com/vsphere-65/index.jsp?topic=%2Fcom.vmware.vsphere.virtualsan.doc%2FGUID-08911FD3-2462-4C1C-AE81-0D4DBC8F7990.html) describes in detail about each of the individual storage capabilities that are supported by VSAN and can be configured on the virtual disk.
|
||||
|
||||
You can also specify the datastore in the Storageclass as shown in example 2. The volume will be created on the datastore specified in the storage class.
|
||||
This field is optional. If not specified as shown in example 1, the volume will be created on the datastore specified in the vsphere config file used to initialize the vSphere Cloud Provider.
|
||||
|
||||
Example 2:
|
||||
|
||||
```yaml
|
||||
kind: StorageClass
|
||||
apiVersion: storage.k8s.io/v1beta1
|
||||
metadata:
|
||||
name: fast
|
||||
provisioner: kubernetes.io/vsphere-volume
|
||||
parameters:
|
||||
diskformat: zeroedthick
|
||||
datastore: VSANDatastore
|
||||
hostFailuresToTolerate: "2"
|
||||
cachereservation: "20"
|
||||
```
|
||||
|
||||
[Download example](vsphere-volume-sc-vsancapabilities-with-datastore.yaml?raw=true)
|
||||
|
||||
__Note: If you do not apply a storage policy during dynamic provisioning on a VSAN datastore, it will use a default Virtual SAN policy.__
|
||||
|
||||
Creating the storageclass:
|
||||
|
||||
``` bash
|
||||
$ kubectl create -f examples/volumes/vsphere/vsphere-volume-sc-vsancapabilities.yaml
|
||||
```
|
||||
|
||||
Verifying storage class is created:
|
||||
|
||||
``` bash
|
||||
$ kubectl describe storageclass fast
|
||||
Name: fast
|
||||
Annotations: <none>
|
||||
Provisioner: kubernetes.io/vsphere-volume
|
||||
Parameters: diskformat=zeroedthick, hostFailuresToTolerate="2", cachereservation="20"
|
||||
No events.
|
||||
```
|
||||
|
||||
2. Create Persistent Volume Claim.
|
||||
|
||||
See example:
|
||||
|
||||
```yaml
|
||||
kind: PersistentVolumeClaim
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: pvcsc-vsan
|
||||
annotations:
|
||||
volume.beta.kubernetes.io/storage-class: fast
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: 2Gi
|
||||
```
|
||||
|
||||
[Download example](vsphere-volume-pvcsc.yaml?raw=true)
|
||||
|
||||
Creating the persistent volume claim:
|
||||
|
||||
``` bash
|
||||
$ kubectl create -f examples/volumes/vsphere/vsphere-volume-pvcsc.yaml
|
||||
```
|
||||
|
||||
Verifying persistent volume claim is created:
|
||||
|
||||
``` bash
|
||||
$ kubectl describe pvc pvcsc-vsan
|
||||
Name: pvcsc-vsan
|
||||
Namespace: default
|
||||
Status: Bound
|
||||
Volume: pvc-80f7b5c1-94b6-11e6-a24f-005056a79d2d
|
||||
Labels: <none>
|
||||
Capacity: 2Gi
|
||||
Access Modes: RWO
|
||||
No events.
|
||||
```
|
||||
|
||||
Persistent Volume is automatically created and is bounded to this pvc.
|
||||
|
||||
Verifying persistent volume claim is created:
|
||||
|
||||
``` bash
|
||||
$ kubectl describe pv pvc-80f7b5c1-94b6-11e6-a24f-005056a79d2d
|
||||
Name: pvc-80f7b5c1-94b6-11e6-a24f-005056a79d2d
|
||||
Labels: <none>
|
||||
Status: Bound
|
||||
Claim: default/pvcsc-vsan
|
||||
Reclaim Policy: Delete
|
||||
Access Modes: RWO
|
||||
Capacity: 2Gi
|
||||
Message:
|
||||
Source:
|
||||
Type: vSphereVolume (a Persistent Disk resource in vSphere)
|
||||
VolumePath: [VSANDatastore] kubevols/kubernetes-dynamic-pvc-80f7b5c1-94b6-11e6-a24f-005056a79d2d.vmdk
|
||||
FSType: ext4
|
||||
No events.
|
||||
```
|
||||
|
||||
__Note: VMDK is created inside ```kubevols``` folder in datastore which is mentioned in 'vsphere' cloudprovider configuration.
|
||||
The cloudprovider config is created during setup of Kubernetes cluster on vSphere.__
|
||||
|
||||
3. Create Pod which uses Persistent Volume Claim with storage class.
|
||||
|
||||
See example:
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: pvpod
|
||||
spec:
|
||||
containers:
|
||||
- name: test-container
|
||||
image: gcr.io/google_containers/test-webserver
|
||||
volumeMounts:
|
||||
- name: test-volume
|
||||
mountPath: /test
|
||||
volumes:
|
||||
- name: test-volume
|
||||
persistentVolumeClaim:
|
||||
claimName: pvcsc-vsan
|
||||
```
|
||||
|
||||
[Download example](vsphere-volume-pvcscpod.yaml?raw=true)
|
||||
|
||||
Creating the pod:
|
||||
|
||||
``` bash
|
||||
$ kubectl create -f examples/volumes/vsphere/vsphere-volume-pvcscpod.yaml
|
||||
```
|
||||
|
||||
Verifying pod is created:
|
||||
|
||||
``` bash
|
||||
$ kubectl get pod pvpod
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
pvpod 1/1 Running 0 48m
|
||||
```
|
||||
|
||||
### Stateful Set
|
||||
|
||||
vSphere volumes can be consumed by Stateful Sets.
|
||||
|
||||
1. Create a storage class that will be used by the ```volumeClaimTemplates``` of a Stateful Set.
|
||||
|
||||
See example:
|
||||
|
||||
```yaml
|
||||
kind: StorageClass
|
||||
apiVersion: storage.k8s.io/v1beta1
|
||||
metadata:
|
||||
name: thin-disk
|
||||
provisioner: kubernetes.io/vsphere-volume
|
||||
parameters:
|
||||
diskformat: thin
|
||||
```
|
||||
|
||||
[Download example](simple-storageclass.yaml)
|
||||
|
||||
2. Create a Stateful set that consumes storage from the Storage Class created.
|
||||
|
||||
See example:
|
||||
```yaml
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: nginx
|
||||
labels:
|
||||
app: nginx
|
||||
spec:
|
||||
ports:
|
||||
- port: 80
|
||||
name: web
|
||||
clusterIP: None
|
||||
selector:
|
||||
app: nginx
|
||||
---
|
||||
apiVersion: apps/v1beta1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: web
|
||||
spec:
|
||||
serviceName: "nginx"
|
||||
replicas: 14
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: nginx
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx
|
||||
image: gcr.io/google_containers/nginx-slim:0.8
|
||||
ports:
|
||||
- containerPort: 80
|
||||
name: web
|
||||
volumeMounts:
|
||||
- name: www
|
||||
mountPath: /usr/share/nginx/html
|
||||
volumeClaimTemplates:
|
||||
- metadata:
|
||||
name: www
|
||||
annotations:
|
||||
volume.beta.kubernetes.io/storage-class: thin-disk
|
||||
spec:
|
||||
accessModes: [ "ReadWriteOnce" ]
|
||||
resources:
|
||||
requests:
|
||||
storage: 1Gi
|
||||
```
|
||||
This will create Persistent Volume Claims for each replica and provision a volume for each claim if an existing volume could be bound to the claim.
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
||||
[]()
|
||||
|
|
1
vendor/k8s.io/kubernetes/examples/volumes/vsphere/vsphere-volume-sc-fast.yaml
generated
vendored
1
vendor/k8s.io/kubernetes/examples/volumes/vsphere/vsphere-volume-sc-fast.yaml
generated
vendored
|
@ -5,3 +5,4 @@ metadata:
|
|||
provisioner: kubernetes.io/vsphere-volume
|
||||
parameters:
|
||||
diskformat: zeroedthick
|
||||
fstype: ext3
|
10
vendor/k8s.io/kubernetes/examples/volumes/vsphere/vsphere-volume-sc-vsancapabilities-with-datastore.yaml
generated
vendored
Normal file
10
vendor/k8s.io/kubernetes/examples/volumes/vsphere/vsphere-volume-sc-vsancapabilities-with-datastore.yaml
generated
vendored
Normal file
|
@ -0,0 +1,10 @@
|
|||
kind: StorageClass
|
||||
apiVersion: storage.k8s.io/v1beta1
|
||||
metadata:
|
||||
name: fast
|
||||
provisioner: kubernetes.io/vsphere-volume
|
||||
parameters:
|
||||
diskformat: zeroedthick
|
||||
datastore: vsanDatastore
|
||||
hostFailuresToTolerate: "2"
|
||||
cachereservation: "20"
|
9
vendor/k8s.io/kubernetes/examples/volumes/vsphere/vsphere-volume-sc-vsancapabilities.yaml
generated
vendored
Normal file
9
vendor/k8s.io/kubernetes/examples/volumes/vsphere/vsphere-volume-sc-vsancapabilities.yaml
generated
vendored
Normal file
|
@ -0,0 +1,9 @@
|
|||
kind: StorageClass
|
||||
apiVersion: storage.k8s.io/v1beta1
|
||||
metadata:
|
||||
name: fast
|
||||
provisioner: kubernetes.io/vsphere-volume
|
||||
parameters:
|
||||
diskformat: zeroedthick
|
||||
hostFailuresToTolerate: "2"
|
||||
cachereservation: "20"
|
2
vendor/k8s.io/kubernetes/federation/apis/openapi-spec/swagger.json
generated
vendored
2
vendor/k8s.io/kubernetes/federation/apis/openapi-spec/swagger.json
generated
vendored
|
@ -2,7 +2,7 @@
|
|||
"swagger": "2.0",
|
||||
"info": {
|
||||
"title": "Generic API Server",
|
||||
"version": "v1.6.1"
|
||||
"version": "v1.6.4"
|
||||
},
|
||||
"paths": {
|
||||
"/api/": {
|
||||
|
|
|
@ -169,7 +169,8 @@ func NewConfigMapController(client federationclientset.Interface) *ConfigMapCont
|
|||
},
|
||||
func(client kubeclientset.Interface, obj pkgruntime.Object) error {
|
||||
configmap := obj.(*apiv1.ConfigMap)
|
||||
err := client.Core().ConfigMaps(configmap.Namespace).Delete(configmap.Name, &metav1.DeleteOptions{})
|
||||
orphanDependents := false
|
||||
err := client.Core().ConfigMaps(configmap.Namespace).Delete(configmap.Name, &metav1.DeleteOptions{OrphanDependents: &orphanDependents})
|
||||
return err
|
||||
})
|
||||
|
||||
|
@ -202,13 +203,13 @@ func (configmapcontroller *ConfigMapController) hasFinalizerFunc(obj pkgruntime.
|
|||
return false
|
||||
}
|
||||
|
||||
// removeFinalizerFunc removes the finalizer from the given objects ObjectMeta. Assumes that the given object is a configmap.
|
||||
func (configmapcontroller *ConfigMapController) removeFinalizerFunc(obj pkgruntime.Object, finalizer string) (pkgruntime.Object, error) {
|
||||
// removeFinalizerFunc removes the given finalizers from the given objects ObjectMeta. Assumes that the given object is a configmap.
|
||||
func (configmapcontroller *ConfigMapController) removeFinalizerFunc(obj pkgruntime.Object, finalizers []string) (pkgruntime.Object, error) {
|
||||
configmap := obj.(*apiv1.ConfigMap)
|
||||
newFinalizers := []string{}
|
||||
hasFinalizer := false
|
||||
for i := range configmap.ObjectMeta.Finalizers {
|
||||
if string(configmap.ObjectMeta.Finalizers[i]) != finalizer {
|
||||
if !deletionhelper.ContainsString(finalizers, configmap.ObjectMeta.Finalizers[i]) {
|
||||
newFinalizers = append(newFinalizers, configmap.ObjectMeta.Finalizers[i])
|
||||
} else {
|
||||
hasFinalizer = true
|
||||
|
@ -221,7 +222,7 @@ func (configmapcontroller *ConfigMapController) removeFinalizerFunc(obj pkgrunti
|
|||
configmap.ObjectMeta.Finalizers = newFinalizers
|
||||
configmap, err := configmapcontroller.federatedApiClient.Core().ConfigMaps(configmap.Namespace).Update(configmap)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to remove finalizer %s from configmap %s: %v", finalizer, configmap.Name, err)
|
||||
return nil, fmt.Errorf("failed to remove finalizers %v from configmap %s: %v", finalizers, configmap.Name, err)
|
||||
}
|
||||
return configmap, nil
|
||||
}
|
||||
|
|
|
@ -182,7 +182,8 @@ func NewDaemonSetController(client federationclientset.Interface) *DaemonSetCont
|
|||
func(client kubeclientset.Interface, obj pkgruntime.Object) error {
|
||||
daemonset := obj.(*extensionsv1.DaemonSet)
|
||||
glog.V(4).Infof("Attempting to delete daemonset: %s/%s", daemonset.Namespace, daemonset.Name)
|
||||
err := client.Extensions().DaemonSets(daemonset.Namespace).Delete(daemonset.Name, &metav1.DeleteOptions{})
|
||||
orphanDependents := false
|
||||
err := client.Extensions().DaemonSets(daemonset.Namespace).Delete(daemonset.Name, &metav1.DeleteOptions{OrphanDependents: &orphanDependents})
|
||||
if err != nil {
|
||||
glog.Errorf("Error deleting daemonset %s/%s/: %v", daemonset.Namespace, daemonset.Name, err)
|
||||
} else {
|
||||
|
@ -220,14 +221,14 @@ func (daemonsetcontroller *DaemonSetController) hasFinalizerFunc(obj pkgruntime.
|
|||
return false
|
||||
}
|
||||
|
||||
// Removes the finalizer from the given objects ObjectMeta.
|
||||
// Removes the finalizers from the given objects ObjectMeta.
|
||||
// Assumes that the given object is a daemonset.
|
||||
func (daemonsetcontroller *DaemonSetController) removeFinalizerFunc(obj pkgruntime.Object, finalizer string) (pkgruntime.Object, error) {
|
||||
func (daemonsetcontroller *DaemonSetController) removeFinalizerFunc(obj pkgruntime.Object, finalizers []string) (pkgruntime.Object, error) {
|
||||
daemonset := obj.(*extensionsv1.DaemonSet)
|
||||
newFinalizers := []string{}
|
||||
hasFinalizer := false
|
||||
for i := range daemonset.ObjectMeta.Finalizers {
|
||||
if string(daemonset.ObjectMeta.Finalizers[i]) != finalizer {
|
||||
if !deletionhelper.ContainsString(finalizers, daemonset.ObjectMeta.Finalizers[i]) {
|
||||
newFinalizers = append(newFinalizers, daemonset.ObjectMeta.Finalizers[i])
|
||||
} else {
|
||||
hasFinalizer = true
|
||||
|
@ -240,7 +241,7 @@ func (daemonsetcontroller *DaemonSetController) removeFinalizerFunc(obj pkgrunti
|
|||
daemonset.ObjectMeta.Finalizers = newFinalizers
|
||||
daemonset, err := daemonsetcontroller.federatedApiClient.Extensions().DaemonSets(daemonset.Namespace).Update(daemonset)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to remove finalizer %s from daemonset %s: %v", finalizer, daemonset.Name, err)
|
||||
return nil, fmt.Errorf("failed to remove finalizers %v from daemonset %s: %v", finalizers, daemonset.Name, err)
|
||||
}
|
||||
return daemonset, nil
|
||||
}
|
||||
|
|
|
@ -201,7 +201,8 @@ func NewDeploymentController(federationClient fedclientset.Interface) *Deploymen
|
|||
},
|
||||
func(client kubeclientset.Interface, obj runtime.Object) error {
|
||||
rs := obj.(*extensionsv1.Deployment)
|
||||
err := client.Extensions().Deployments(rs.Namespace).Delete(rs.Name, &metav1.DeleteOptions{})
|
||||
orphanDependents := false
|
||||
err := client.Extensions().Deployments(rs.Namespace).Delete(rs.Name, &metav1.DeleteOptions{OrphanDependents: &orphanDependents})
|
||||
return err
|
||||
})
|
||||
|
||||
|
@ -234,14 +235,14 @@ func (fdc *DeploymentController) hasFinalizerFunc(obj runtime.Object, finalizer
|
|||
return false
|
||||
}
|
||||
|
||||
// Removes the finalizer from the given objects ObjectMeta.
|
||||
// Removes the finalizers from the given objects ObjectMeta.
|
||||
// Assumes that the given object is a deployment.
|
||||
func (fdc *DeploymentController) removeFinalizerFunc(obj runtime.Object, finalizer string) (runtime.Object, error) {
|
||||
func (fdc *DeploymentController) removeFinalizerFunc(obj runtime.Object, finalizers []string) (runtime.Object, error) {
|
||||
deployment := obj.(*extensionsv1.Deployment)
|
||||
newFinalizers := []string{}
|
||||
hasFinalizer := false
|
||||
for i := range deployment.ObjectMeta.Finalizers {
|
||||
if string(deployment.ObjectMeta.Finalizers[i]) != finalizer {
|
||||
if !deletionhelper.ContainsString(finalizers, deployment.ObjectMeta.Finalizers[i]) {
|
||||
newFinalizers = append(newFinalizers, deployment.ObjectMeta.Finalizers[i])
|
||||
} else {
|
||||
hasFinalizer = true
|
||||
|
@ -254,7 +255,7 @@ func (fdc *DeploymentController) removeFinalizerFunc(obj runtime.Object, finaliz
|
|||
deployment.ObjectMeta.Finalizers = newFinalizers
|
||||
deployment, err := fdc.fedClient.Extensions().Deployments(deployment.Namespace).Update(deployment)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to remove finalizer %s from deployment %s: %v", finalizer, deployment.Name, err)
|
||||
return nil, fmt.Errorf("failed to remove finalizers %v from deployment %s: %v", finalizers, deployment.Name, err)
|
||||
}
|
||||
return deployment, nil
|
||||
}
|
||||
|
|
|
@ -255,7 +255,8 @@ func NewIngressController(client federationclientset.Interface) *IngressControll
|
|||
func(client kubeclientset.Interface, obj pkgruntime.Object) error {
|
||||
ingress := obj.(*extensionsv1beta1.Ingress)
|
||||
glog.V(4).Infof("Attempting to delete Ingress: %v", ingress)
|
||||
err := client.Extensions().Ingresses(ingress.Namespace).Delete(ingress.Name, &metav1.DeleteOptions{})
|
||||
orphanDependents := false
|
||||
err := client.Extensions().Ingresses(ingress.Namespace).Delete(ingress.Name, &metav1.DeleteOptions{OrphanDependents: &orphanDependents})
|
||||
return err
|
||||
})
|
||||
|
||||
|
@ -316,14 +317,14 @@ func (ic *IngressController) hasFinalizerFunc(obj pkgruntime.Object, finalizer s
|
|||
return false
|
||||
}
|
||||
|
||||
// Removes the finalizer from the given objects ObjectMeta.
|
||||
// Removes the finalizers from the given objects ObjectMeta.
|
||||
// Assumes that the given object is a ingress.
|
||||
func (ic *IngressController) removeFinalizerFunc(obj pkgruntime.Object, finalizer string) (pkgruntime.Object, error) {
|
||||
func (ic *IngressController) removeFinalizerFunc(obj pkgruntime.Object, finalizers []string) (pkgruntime.Object, error) {
|
||||
ingress := obj.(*extensionsv1beta1.Ingress)
|
||||
newFinalizers := []string{}
|
||||
hasFinalizer := false
|
||||
for i := range ingress.ObjectMeta.Finalizers {
|
||||
if string(ingress.ObjectMeta.Finalizers[i]) != finalizer {
|
||||
if !deletionhelper.ContainsString(finalizers, ingress.ObjectMeta.Finalizers[i]) {
|
||||
newFinalizers = append(newFinalizers, ingress.ObjectMeta.Finalizers[i])
|
||||
} else {
|
||||
hasFinalizer = true
|
||||
|
@ -336,7 +337,7 @@ func (ic *IngressController) removeFinalizerFunc(obj pkgruntime.Object, finalize
|
|||
ingress.ObjectMeta.Finalizers = newFinalizers
|
||||
ingress, err := ic.federatedApiClient.Extensions().Ingresses(ingress.Namespace).Update(ingress)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to remove finalizer %s from ingress %s: %v", finalizer, ingress.Name, err)
|
||||
return nil, fmt.Errorf("failed to remove finalizers %v from ingress %s: %v", finalizers, ingress.Name, err)
|
||||
}
|
||||
return ingress, nil
|
||||
}
|
||||
|
|
|
@ -169,7 +169,8 @@ func NewNamespaceController(client federationclientset.Interface, dynamicClientP
|
|||
},
|
||||
func(client kubeclientset.Interface, obj runtime.Object) error {
|
||||
namespace := obj.(*apiv1.Namespace)
|
||||
err := client.Core().Namespaces().Delete(namespace.Name, &metav1.DeleteOptions{})
|
||||
orphanDependents := false
|
||||
err := client.Core().Namespaces().Delete(namespace.Name, &metav1.DeleteOptions{OrphanDependents: &orphanDependents})
|
||||
// IsNotFound error is fine since that means the object is deleted already.
|
||||
if errors.IsNotFound(err) {
|
||||
return nil
|
||||
|
@ -210,14 +211,14 @@ func (nc *NamespaceController) hasFinalizerFunc(obj runtime.Object, finalizer st
|
|||
return false
|
||||
}
|
||||
|
||||
// Removes the finalizer from the given objects ObjectMeta.
|
||||
// Removes the finalizers from the given objects ObjectMeta.
|
||||
// Assumes that the given object is a namespace.
|
||||
func (nc *NamespaceController) removeFinalizerFunc(obj runtime.Object, finalizer string) (runtime.Object, error) {
|
||||
func (nc *NamespaceController) removeFinalizerFunc(obj runtime.Object, finalizers []string) (runtime.Object, error) {
|
||||
namespace := obj.(*apiv1.Namespace)
|
||||
newFinalizers := []string{}
|
||||
hasFinalizer := false
|
||||
for i := range namespace.ObjectMeta.Finalizers {
|
||||
if string(namespace.ObjectMeta.Finalizers[i]) != finalizer {
|
||||
if !deletionhelper.ContainsString(finalizers, namespace.ObjectMeta.Finalizers[i]) {
|
||||
newFinalizers = append(newFinalizers, namespace.ObjectMeta.Finalizers[i])
|
||||
} else {
|
||||
hasFinalizer = true
|
||||
|
@ -230,7 +231,7 @@ func (nc *NamespaceController) removeFinalizerFunc(obj runtime.Object, finalizer
|
|||
namespace.ObjectMeta.Finalizers = newFinalizers
|
||||
namespace, err := nc.federatedApiClient.Core().Namespaces().Update(namespace)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to remove finalizer %s from namespace %s: %v", finalizer, namespace.Name, err)
|
||||
return nil, fmt.Errorf("failed to remove finalizers %v from namespace %s: %v", finalizers, namespace.Name, err)
|
||||
}
|
||||
return namespace, nil
|
||||
}
|
||||
|
|
|
@ -209,7 +209,8 @@ func NewReplicaSetController(federationClient fedclientset.Interface) *ReplicaSe
|
|||
},
|
||||
func(client kubeclientset.Interface, obj runtime.Object) error {
|
||||
rs := obj.(*extensionsv1.ReplicaSet)
|
||||
err := client.Extensions().ReplicaSets(rs.Namespace).Delete(rs.Name, &metav1.DeleteOptions{})
|
||||
orphanDependents := false
|
||||
err := client.Extensions().ReplicaSets(rs.Namespace).Delete(rs.Name, &metav1.DeleteOptions{OrphanDependents: &orphanDependents})
|
||||
return err
|
||||
})
|
||||
|
||||
|
@ -242,14 +243,14 @@ func (frsc *ReplicaSetController) hasFinalizerFunc(obj runtime.Object, finalizer
|
|||
return false
|
||||
}
|
||||
|
||||
// Removes the finalizer from the given objects ObjectMeta.
|
||||
// Removes the finalizers from the given objects ObjectMeta.
|
||||
// Assumes that the given object is a replicaset.
|
||||
func (frsc *ReplicaSetController) removeFinalizerFunc(obj runtime.Object, finalizer string) (runtime.Object, error) {
|
||||
func (frsc *ReplicaSetController) removeFinalizerFunc(obj runtime.Object, finalizers []string) (runtime.Object, error) {
|
||||
replicaset := obj.(*extensionsv1.ReplicaSet)
|
||||
newFinalizers := []string{}
|
||||
hasFinalizer := false
|
||||
for i := range replicaset.ObjectMeta.Finalizers {
|
||||
if string(replicaset.ObjectMeta.Finalizers[i]) != finalizer {
|
||||
if !deletionhelper.ContainsString(finalizers, replicaset.ObjectMeta.Finalizers[i]) {
|
||||
newFinalizers = append(newFinalizers, replicaset.ObjectMeta.Finalizers[i])
|
||||
} else {
|
||||
hasFinalizer = true
|
||||
|
@ -262,7 +263,7 @@ func (frsc *ReplicaSetController) removeFinalizerFunc(obj runtime.Object, finali
|
|||
replicaset.ObjectMeta.Finalizers = newFinalizers
|
||||
replicaset, err := frsc.fedClient.Extensions().ReplicaSets(replicaset.Namespace).Update(replicaset)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to remove finalizer %s from replicaset %s: %v", finalizer, replicaset.Name, err)
|
||||
return nil, fmt.Errorf("failed to remove finalizers %v from replicaset %s: %v", finalizers, replicaset.Name, err)
|
||||
}
|
||||
return replicaset, nil
|
||||
}
|
||||
|
|
|
@ -168,7 +168,8 @@ func NewSecretController(client federationclientset.Interface) *SecretController
|
|||
},
|
||||
func(client kubeclientset.Interface, obj pkgruntime.Object) error {
|
||||
secret := obj.(*apiv1.Secret)
|
||||
err := client.Core().Secrets(secret.Namespace).Delete(secret.Name, &metav1.DeleteOptions{})
|
||||
orphanDependents := false
|
||||
err := client.Core().Secrets(secret.Namespace).Delete(secret.Name, &metav1.DeleteOptions{OrphanDependents: &orphanDependents})
|
||||
return err
|
||||
})
|
||||
|
||||
|
@ -201,14 +202,14 @@ func (secretcontroller *SecretController) hasFinalizerFunc(obj pkgruntime.Object
|
|||
return false
|
||||
}
|
||||
|
||||
// Removes the finalizer from the given objects ObjectMeta.
|
||||
// Removes the finalizers from the given objects ObjectMeta.
|
||||
// Assumes that the given object is a secret.
|
||||
func (secretcontroller *SecretController) removeFinalizerFunc(obj pkgruntime.Object, finalizer string) (pkgruntime.Object, error) {
|
||||
func (secretcontroller *SecretController) removeFinalizerFunc(obj pkgruntime.Object, finalizers []string) (pkgruntime.Object, error) {
|
||||
secret := obj.(*apiv1.Secret)
|
||||
newFinalizers := []string{}
|
||||
hasFinalizer := false
|
||||
for i := range secret.ObjectMeta.Finalizers {
|
||||
if string(secret.ObjectMeta.Finalizers[i]) != finalizer {
|
||||
if !deletionhelper.ContainsString(finalizers, secret.ObjectMeta.Finalizers[i]) {
|
||||
newFinalizers = append(newFinalizers, secret.ObjectMeta.Finalizers[i])
|
||||
} else {
|
||||
hasFinalizer = true
|
||||
|
@ -221,7 +222,7 @@ func (secretcontroller *SecretController) removeFinalizerFunc(obj pkgruntime.Obj
|
|||
secret.ObjectMeta.Finalizers = newFinalizers
|
||||
secret, err := secretcontroller.federatedApiClient.Core().Secrets(secret.Namespace).Update(secret)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to remove finalizer %s from secret %s: %v", finalizer, secret.Name, err)
|
||||
return nil, fmt.Errorf("failed to remove finalizers %v from secret %s: %v", finalizers, secret.Name, err)
|
||||
}
|
||||
return secret, nil
|
||||
}
|
||||
|
|
|
@ -291,7 +291,8 @@ func New(federationClient fedclientset.Interface, dns dnsprovider.Interface,
|
|||
},
|
||||
func(client kubeclientset.Interface, obj pkgruntime.Object) error {
|
||||
svc := obj.(*v1.Service)
|
||||
err := client.Core().Services(svc.Namespace).Delete(svc.Name, &metav1.DeleteOptions{})
|
||||
orphanDependents := false
|
||||
err := client.Core().Services(svc.Namespace).Delete(svc.Name, &metav1.DeleteOptions{OrphanDependents: &orphanDependents})
|
||||
return err
|
||||
})
|
||||
|
||||
|
@ -328,14 +329,14 @@ func (s *ServiceController) hasFinalizerFunc(obj pkgruntime.Object, finalizer st
|
|||
return false
|
||||
}
|
||||
|
||||
// Removes the finalizer from the given objects ObjectMeta.
|
||||
// Removes the finalizers from the given objects ObjectMeta.
|
||||
// Assumes that the given object is a service.
|
||||
func (s *ServiceController) removeFinalizerFunc(obj pkgruntime.Object, finalizer string) (pkgruntime.Object, error) {
|
||||
func (s *ServiceController) removeFinalizerFunc(obj pkgruntime.Object, finalizers []string) (pkgruntime.Object, error) {
|
||||
service := obj.(*v1.Service)
|
||||
newFinalizers := []string{}
|
||||
hasFinalizer := false
|
||||
for i := range service.ObjectMeta.Finalizers {
|
||||
if string(service.ObjectMeta.Finalizers[i]) != finalizer {
|
||||
if !deletionhelper.ContainsString(finalizers, service.ObjectMeta.Finalizers[i]) {
|
||||
newFinalizers = append(newFinalizers, service.ObjectMeta.Finalizers[i])
|
||||
} else {
|
||||
hasFinalizer = true
|
||||
|
@ -348,7 +349,7 @@ func (s *ServiceController) removeFinalizerFunc(obj pkgruntime.Object, finalizer
|
|||
service.ObjectMeta.Finalizers = newFinalizers
|
||||
service, err := s.federationClient.Core().Services(service.Namespace).Update(service)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to remove finalizer %s from service %s: %v", finalizer, service.Name, err)
|
||||
return nil, fmt.Errorf("failed to remove finalizers %v from service %s: %v", finalizers, service.Name, err)
|
||||
}
|
||||
return service, nil
|
||||
}
|
||||
|
|
|
@ -9,7 +9,10 @@ load(
|
|||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["deletion_helper.go"],
|
||||
srcs = [
|
||||
"deletion_helper.go",
|
||||
"util.go",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//federation/pkg/federation-controller/util:go_default_library",
|
||||
|
|
|
@ -45,7 +45,7 @@ const (
|
|||
)
|
||||
|
||||
type HasFinalizerFunc func(runtime.Object, string) bool
|
||||
type RemoveFinalizerFunc func(runtime.Object, string) (runtime.Object, error)
|
||||
type RemoveFinalizerFunc func(runtime.Object, []string) (runtime.Object, error)
|
||||
type AddFinalizerFunc func(runtime.Object, []string) (runtime.Object, error)
|
||||
type ObjNameFunc func(runtime.Object) string
|
||||
|
||||
|
@ -123,11 +123,8 @@ func (dh *DeletionHelper) HandleObjectInUnderlyingClusters(obj runtime.Object) (
|
|||
// If the obj has FinalizerOrphan finalizer, then we need to orphan the
|
||||
// corresponding objects in underlying clusters.
|
||||
// Just remove both the finalizers in that case.
|
||||
obj, err := dh.removeFinalizerFunc(obj, FinalizerDeleteFromUnderlyingClusters)
|
||||
if err != nil {
|
||||
return obj, err
|
||||
}
|
||||
return dh.removeFinalizerFunc(obj, metav1.FinalizerOrphanDependents)
|
||||
finalizers := []string{FinalizerDeleteFromUnderlyingClusters, metav1.FinalizerOrphanDependents}
|
||||
return dh.removeFinalizerFunc(obj, finalizers)
|
||||
}
|
||||
|
||||
glog.V(2).Infof("Deleting obj %s from underlying clusters", objName)
|
||||
|
@ -183,5 +180,5 @@ func (dh *DeletionHelper) HandleObjectInUnderlyingClusters(obj runtime.Object) (
|
|||
}
|
||||
|
||||
// All done. Just remove the finalizer.
|
||||
return dh.removeFinalizerFunc(obj, FinalizerDeleteFromUnderlyingClusters)
|
||||
return dh.removeFinalizerFunc(obj, []string{FinalizerDeleteFromUnderlyingClusters})
|
||||
}
|
||||
|
|
28
vendor/k8s.io/kubernetes/federation/pkg/federation-controller/util/deletionhelper/util.go
generated
vendored
Normal file
28
vendor/k8s.io/kubernetes/federation/pkg/federation-controller/util/deletionhelper/util.go
generated
vendored
Normal file
|
@ -0,0 +1,28 @@
|
|||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package deletionhelper
|
||||
|
||||
// ContainsString returns true if the given string slice contains the given string.
|
||||
// Returns false otherwise.
|
||||
func ContainsString(arr []string, s string) bool {
|
||||
for i := range arr {
|
||||
if arr[i] == s {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
2
vendor/k8s.io/kubernetes/federation/pkg/kubefed/init/init.go
generated
vendored
2
vendor/k8s.io/kubernetes/federation/pkg/kubefed/init/init.go
generated
vendored
|
@ -922,7 +922,7 @@ func marshallOverrides(overrideArgString string) (map[string]string, error) {
|
|||
argsMap := make(map[string]string)
|
||||
overrideArgs := strings.Split(overrideArgString, ",")
|
||||
for _, overrideArg := range overrideArgs {
|
||||
splitArg := strings.Split(overrideArg, "=")
|
||||
splitArg := strings.SplitN(overrideArg, "=", 2)
|
||||
if len(splitArg) != 2 {
|
||||
return nil, fmt.Errorf("wrong format for override arg: %s", overrideArg)
|
||||
}
|
||||
|
|
6
vendor/k8s.io/kubernetes/federation/pkg/kubefed/init/init_test.go
generated
vendored
6
vendor/k8s.io/kubernetes/federation/pkg/kubefed/init/init_test.go
generated
vendored
|
@ -322,8 +322,10 @@ func TestMarshallAndMergeOverrides(t *testing.T) {
|
|||
expectedErr: "wrong format for override arg: wrong-format-arg",
|
||||
},
|
||||
{
|
||||
overrideParams: "wrong-format-arg=override=wrong-format-arg=override",
|
||||
expectedErr: "wrong format for override arg: wrong-format-arg=override=wrong-format-arg=override",
|
||||
// TODO: Multiple arg values separated by , are not supported yet
|
||||
overrideParams: "multiple-equalto-char=first-key=1",
|
||||
expectedSet: sets.NewString("arg2=val2", "arg1=val1", "multiple-equalto-char=first-key=1"),
|
||||
expectedErr: "",
|
||||
},
|
||||
{
|
||||
overrideParams: "=wrong-format-only-value",
|
||||
|
|
3
vendor/k8s.io/kubernetes/federation/pkg/kubefed/unjoin.go
generated
vendored
3
vendor/k8s.io/kubernetes/federation/pkg/kubefed/unjoin.go
generated
vendored
|
@ -194,7 +194,8 @@ func deleteConfigMapFromCluster(hostClientset internalclientset.Interface, secre
|
|||
// deleteSecret deletes the secret with the given name from the host
|
||||
// cluster.
|
||||
func deleteSecret(clientset internalclientset.Interface, name, namespace string) error {
|
||||
return clientset.Core().Secrets(namespace).Delete(name, &metav1.DeleteOptions{})
|
||||
orphanDependents := false
|
||||
return clientset.Core().Secrets(namespace).Delete(name, &metav1.DeleteOptions{OrphanDependents: &orphanDependents})
|
||||
}
|
||||
|
||||
// isNotFound checks if the given error is a NotFound status error.
|
||||
|
|
4
vendor/k8s.io/kubernetes/hack/make-rules/test-cmd.sh
generated
vendored
4
vendor/k8s.io/kubernetes/hack/make-rules/test-cmd.sh
generated
vendored
|
@ -36,10 +36,14 @@ function run_kube_apiserver() {
|
|||
# Admission Controllers to invoke prior to persisting objects in cluster
|
||||
ADMISSION_CONTROL="NamespaceLifecycle,LimitRanger,ResourceQuota"
|
||||
|
||||
# Include RBAC (to exercise bootstrapping), and AlwaysAllow to allow all actions
|
||||
AUTHORIZATION_MODE="RBAC,AlwaysAllow"
|
||||
|
||||
"${KUBE_OUTPUT_HOSTBIN}/kube-apiserver" \
|
||||
--address="127.0.0.1" \
|
||||
--public-address-override="127.0.0.1" \
|
||||
--port="${API_PORT}" \
|
||||
--authorization-mode="${AUTHORIZATION_MODE}" \
|
||||
--admission-control="${ADMISSION_CONTROL}" \
|
||||
--etcd-servers="http://${ETCD_HOST}:${ETCD_PORT}" \
|
||||
--public-address-override="127.0.0.1" \
|
||||
|
|
16
vendor/k8s.io/kubernetes/pkg/api/v1/helpers.go
generated
vendored
16
vendor/k8s.io/kubernetes/pkg/api/v1/helpers.go
generated
vendored
|
@ -276,10 +276,10 @@ const (
|
|||
AffinityAnnotationKey string = "scheduler.alpha.kubernetes.io/affinity"
|
||||
)
|
||||
|
||||
// Tries to add a toleration to annotations list. Returns true if something was updated
|
||||
// false otherwise.
|
||||
func AddOrUpdateTolerationInPod(pod *Pod, toleration *Toleration) (bool, error) {
|
||||
podTolerations := pod.Spec.Tolerations
|
||||
// AddOrUpdateTolerationInPodSpec tries to add a toleration to the toleration list in PodSpec.
|
||||
// Returns true if something was updated, false otherwise.
|
||||
func AddOrUpdateTolerationInPodSpec(spec *PodSpec, toleration *Toleration) (bool, error) {
|
||||
podTolerations := spec.Tolerations
|
||||
|
||||
var newTolerations []Toleration
|
||||
updated := false
|
||||
|
@ -300,10 +300,16 @@ func AddOrUpdateTolerationInPod(pod *Pod, toleration *Toleration) (bool, error)
|
|||
newTolerations = append(newTolerations, *toleration)
|
||||
}
|
||||
|
||||
pod.Spec.Tolerations = newTolerations
|
||||
spec.Tolerations = newTolerations
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// AddOrUpdateTolerationInPod tries to add a toleration to the pod's toleration list.
|
||||
// Returns true if something was updated, false otherwise.
|
||||
func AddOrUpdateTolerationInPod(pod *Pod, toleration *Toleration) (bool, error) {
|
||||
return AddOrUpdateTolerationInPodSpec(&pod.Spec, toleration)
|
||||
}
|
||||
|
||||
// MatchToleration checks if the toleration matches tolerationToMatch. Tolerations are unique by <key,effect,operator,value>,
|
||||
// if the two tolerations have same <key,effect,operator,value> combination, regard as they match.
|
||||
// TODO: uniqueness check for tolerations in api validations.
|
||||
|
|
1
vendor/k8s.io/kubernetes/pkg/apis/batch/types.go
generated
vendored
1
vendor/k8s.io/kubernetes/pkg/apis/batch/types.go
generated
vendored
|
@ -233,6 +233,7 @@ type CronJobSpec struct {
|
|||
StartingDeadlineSeconds *int64
|
||||
|
||||
// ConcurrencyPolicy specifies how to treat concurrent executions of a Job.
|
||||
// Defaults to Allow.
|
||||
// +optional
|
||||
ConcurrencyPolicy ConcurrencyPolicy
|
||||
|
||||
|
|
2
vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/defaults_test.go
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/defaults_test.go
generated
vendored
|
@ -41,7 +41,7 @@ func TestSetDefaultCronJob(t *testing.T) {
|
|||
},
|
||||
},
|
||||
},
|
||||
"nothing should be defaulted": {
|
||||
"set fields should not be defaulted": {
|
||||
original: &CronJob{
|
||||
Spec: CronJobSpec{
|
||||
ConcurrencyPolicy: ForbidConcurrent,
|
||||
|
|
1
vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/generated.proto
generated
vendored
1
vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/generated.proto
generated
vendored
|
@ -72,6 +72,7 @@ message CronJobSpec {
|
|||
optional int64 startingDeadlineSeconds = 2;
|
||||
|
||||
// ConcurrencyPolicy specifies how to treat concurrent executions of a Job.
|
||||
// Defaults to Allow.
|
||||
// +optional
|
||||
optional string concurrencyPolicy = 3;
|
||||
|
||||
|
|
1
vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/types.go
generated
vendored
1
vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/types.go
generated
vendored
|
@ -94,6 +94,7 @@ type CronJobSpec struct {
|
|||
StartingDeadlineSeconds *int64 `json:"startingDeadlineSeconds,omitempty" protobuf:"varint,2,opt,name=startingDeadlineSeconds"`
|
||||
|
||||
// ConcurrencyPolicy specifies how to treat concurrent executions of a Job.
|
||||
// Defaults to Allow.
|
||||
// +optional
|
||||
ConcurrencyPolicy ConcurrencyPolicy `json:"concurrencyPolicy,omitempty" protobuf:"bytes,3,opt,name=concurrencyPolicy,casttype=ConcurrencyPolicy"`
|
||||
|
||||
|
|
2
vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/types_swagger_doc_generated.go
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/types_swagger_doc_generated.go
generated
vendored
|
@ -52,7 +52,7 @@ var map_CronJobSpec = map[string]string{
|
|||
"": "CronJobSpec describes how the job execution will look like and when it will actually run.",
|
||||
"schedule": "Schedule contains the schedule in Cron format, see https://en.wikipedia.org/wiki/Cron.",
|
||||
"startingDeadlineSeconds": "Optional deadline in seconds for starting the job if it misses scheduled time for any reason. Missed jobs executions will be counted as failed ones.",
|
||||
"concurrencyPolicy": "ConcurrencyPolicy specifies how to treat concurrent executions of a Job.",
|
||||
"concurrencyPolicy": "ConcurrencyPolicy specifies how to treat concurrent executions of a Job. Defaults to Allow.",
|
||||
"suspend": "Suspend flag tells the controller to suspend subsequent executions, it does not apply to already started executions. Defaults to false.",
|
||||
"jobTemplate": "JobTemplate is the object that describes the job that will be created when executing a CronJob.",
|
||||
"successfulJobsHistoryLimit": "The number of successful finished jobs to retain. This is a pointer to distinguish between explicit zero and not specified.",
|
||||
|
|
4
vendor/k8s.io/kubernetes/pkg/apis/storage/util/helpers.go
generated
vendored
4
vendor/k8s.io/kubernetes/pkg/apis/storage/util/helpers.go
generated
vendored
|
@ -20,8 +20,8 @@ import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
|||
|
||||
// IsDefaultStorageClassAnnotation represents a StorageClass annotation that
|
||||
// marks a class as the default StorageClass
|
||||
//TODO: Update IsDefaultStorageClassannotation and remove Beta when no longer used
|
||||
const IsDefaultStorageClassAnnotation = "storageclass.beta.kubernetes.io/is-default-class"
|
||||
//TODO: remove Beta when no longer used
|
||||
const IsDefaultStorageClassAnnotation = "storageclass.kubernetes.io/is-default-class"
|
||||
const BetaIsDefaultStorageClassAnnotation = "storageclass.beta.kubernetes.io/is-default-class"
|
||||
|
||||
// IsDefaultAnnotationText returns a pretty Yes/No String if
|
||||
|
|
9
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/aws.go
generated
vendored
9
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/aws.go
generated
vendored
|
@ -392,6 +392,10 @@ type CloudConfig struct {
|
|||
// on a different aws account, on a different cloud provider or on-premise.
|
||||
// If the flag is set also the KubernetesClusterTag must be provided
|
||||
VPC string
|
||||
// SubnetID enables using a specific subnet to use for ELB's
|
||||
SubnetID string
|
||||
// RouteTableID enables using a specific RouteTable
|
||||
RouteTableID string
|
||||
|
||||
// KubernetesClusterTag is the legacy cluster id we'll use to identify our cluster resources
|
||||
KubernetesClusterTag string
|
||||
|
@ -817,13 +821,14 @@ func newAWSCloud(config io.Reader, awsServices Services) (*Cloud, error) {
|
|||
deviceAllocators: make(map[types.NodeName]DeviceAllocator),
|
||||
}
|
||||
|
||||
if cfg.Global.VPC != "" && cfg.Global.KubernetesClusterTag != "" {
|
||||
if cfg.Global.VPC != "" && cfg.Global.SubnetID != "" && (cfg.Global.KubernetesClusterTag != "" || cfg.Global.KubernetesClusterID != "") {
|
||||
// When the master is running on a different AWS account, cloud provider or on-premise
|
||||
// build up a dummy instance and use the VPC from the nodes account
|
||||
glog.Info("Master is configured to run on a AWS account, different cloud provider or on-premise")
|
||||
glog.Info("Master is configured to run on a different AWS account, different cloud provider or on-premise")
|
||||
awsCloud.selfAWSInstance = &awsInstance{
|
||||
nodeName: "master-dummy",
|
||||
vpcID: cfg.Global.VPC,
|
||||
subnetID: cfg.Global.SubnetID,
|
||||
}
|
||||
awsCloud.vpcID = cfg.Global.VPC
|
||||
} else {
|
||||
|
|
30
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/aws_routes.go
generated
vendored
30
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/aws_routes.go
generated
vendored
|
@ -29,17 +29,27 @@ func (c *Cloud) findRouteTable(clusterName string) (*ec2.RouteTable, error) {
|
|||
// This should be unnecessary (we already filter on TagNameKubernetesCluster,
|
||||
// and something is broken if cluster name doesn't match, but anyway...
|
||||
// TODO: All clouds should be cluster-aware by default
|
||||
request := &ec2.DescribeRouteTablesInput{Filters: c.tagging.addFilters(nil)}
|
||||
|
||||
response, err := c.ec2.DescribeRouteTables(request)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var tables []*ec2.RouteTable
|
||||
for _, table := range response {
|
||||
if c.tagging.hasClusterTag(table.Tags) {
|
||||
tables = append(tables, table)
|
||||
|
||||
if c.cfg.Global.RouteTableID != "" {
|
||||
request := &ec2.DescribeRouteTablesInput{Filters: []*ec2.Filter{newEc2Filter("route-table-id", c.cfg.Global.RouteTableID)}}
|
||||
response, err := c.ec2.DescribeRouteTables(request)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
tables = response
|
||||
} else {
|
||||
request := &ec2.DescribeRouteTablesInput{Filters: c.tagging.addFilters(nil)}
|
||||
response, err := c.ec2.DescribeRouteTables(request)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, table := range response {
|
||||
if c.tagging.hasClusterTag(table.Tags) {
|
||||
tables = append(tables, table)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
1
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/BUILD
generated
vendored
1
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/BUILD
generated
vendored
|
@ -28,6 +28,7 @@ go_library(
|
|||
"//pkg/api/v1:go_default_library",
|
||||
"//pkg/api/v1/service:go_default_library",
|
||||
"//pkg/cloudprovider:go_default_library",
|
||||
"//pkg/version:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
"//vendor:github.com/Azure/azure-sdk-for-go/arm/compute",
|
||||
"//vendor:github.com/Azure/azure-sdk-for-go/arm/network",
|
||||
|
|
24
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure.go
generated
vendored
24
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure.go
generated
vendored
|
@ -17,14 +17,17 @@ limitations under the License.
|
|||
package azure
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
"k8s.io/kubernetes/pkg/version"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/arm/compute"
|
||||
"github.com/Azure/azure-sdk-for-go/arm/network"
|
||||
"github.com/Azure/azure-sdk-for-go/arm/storage"
|
||||
"github.com/Azure/go-autorest/autorest"
|
||||
"github.com/Azure/go-autorest/autorest/azure"
|
||||
"github.com/ghodss/yaml"
|
||||
"time"
|
||||
|
@ -125,38 +128,54 @@ func NewCloud(configReader io.Reader) (cloudprovider.Interface, error) {
|
|||
az.SubnetsClient = network.NewSubnetsClient(az.SubscriptionID)
|
||||
az.SubnetsClient.BaseURI = az.Environment.ResourceManagerEndpoint
|
||||
az.SubnetsClient.Authorizer = servicePrincipalToken
|
||||
az.SubnetsClient.PollingDelay = 5 * time.Second
|
||||
configureUserAgent(&az.SubnetsClient.Client)
|
||||
|
||||
az.RouteTablesClient = network.NewRouteTablesClient(az.SubscriptionID)
|
||||
az.RouteTablesClient.BaseURI = az.Environment.ResourceManagerEndpoint
|
||||
az.RouteTablesClient.Authorizer = servicePrincipalToken
|
||||
az.RouteTablesClient.PollingDelay = 5 * time.Second
|
||||
configureUserAgent(&az.RouteTablesClient.Client)
|
||||
|
||||
az.RoutesClient = network.NewRoutesClient(az.SubscriptionID)
|
||||
az.RoutesClient.BaseURI = az.Environment.ResourceManagerEndpoint
|
||||
az.RoutesClient.Authorizer = servicePrincipalToken
|
||||
az.RoutesClient.PollingDelay = 5 * time.Second
|
||||
configureUserAgent(&az.RoutesClient.Client)
|
||||
|
||||
az.InterfacesClient = network.NewInterfacesClient(az.SubscriptionID)
|
||||
az.InterfacesClient.BaseURI = az.Environment.ResourceManagerEndpoint
|
||||
az.InterfacesClient.Authorizer = servicePrincipalToken
|
||||
az.InterfacesClient.PollingDelay = 5 * time.Second
|
||||
configureUserAgent(&az.InterfacesClient.Client)
|
||||
|
||||
az.LoadBalancerClient = network.NewLoadBalancersClient(az.SubscriptionID)
|
||||
az.LoadBalancerClient.BaseURI = az.Environment.ResourceManagerEndpoint
|
||||
az.LoadBalancerClient.Authorizer = servicePrincipalToken
|
||||
az.LoadBalancerClient.PollingDelay = 5 * time.Second
|
||||
configureUserAgent(&az.LoadBalancerClient.Client)
|
||||
|
||||
az.VirtualMachinesClient = compute.NewVirtualMachinesClient(az.SubscriptionID)
|
||||
az.VirtualMachinesClient.BaseURI = az.Environment.ResourceManagerEndpoint
|
||||
az.VirtualMachinesClient.Authorizer = servicePrincipalToken
|
||||
az.VirtualMachinesClient.PollingDelay = 5 * time.Second
|
||||
configureUserAgent(&az.VirtualMachinesClient.Client)
|
||||
|
||||
az.PublicIPAddressesClient = network.NewPublicIPAddressesClient(az.SubscriptionID)
|
||||
az.PublicIPAddressesClient.BaseURI = az.Environment.ResourceManagerEndpoint
|
||||
az.PublicIPAddressesClient.Authorizer = servicePrincipalToken
|
||||
az.PublicIPAddressesClient.PollingDelay = 5 * time.Second
|
||||
configureUserAgent(&az.PublicIPAddressesClient.Client)
|
||||
|
||||
az.SecurityGroupsClient = network.NewSecurityGroupsClient(az.SubscriptionID)
|
||||
az.SecurityGroupsClient.BaseURI = az.Environment.ResourceManagerEndpoint
|
||||
az.SecurityGroupsClient.Authorizer = servicePrincipalToken
|
||||
az.SecurityGroupsClient.PollingDelay = 5 * time.Second
|
||||
configureUserAgent(&az.SecurityGroupsClient.Client)
|
||||
|
||||
az.StorageAccountClient = storage.NewAccountsClientWithBaseURI(az.Environment.ResourceManagerEndpoint, az.SubscriptionID)
|
||||
az.StorageAccountClient.Authorizer = servicePrincipalToken
|
||||
|
||||
return &az, nil
|
||||
}
|
||||
|
||||
|
@ -194,3 +213,8 @@ func (az *Cloud) ScrubDNS(nameservers, searches []string) (nsOut, srchOut []stri
|
|||
func (az *Cloud) ProviderName() string {
|
||||
return CloudProviderName
|
||||
}
|
||||
|
||||
func configureUserAgent(client *autorest.Client) {
|
||||
k8sVersion := version.Get().GitVersion
|
||||
client.UserAgent = fmt.Sprintf("%s; %s", client.UserAgent, k8sVersion)
|
||||
}
|
||||
|
|
654
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vsphere.go
generated
vendored
654
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vsphere.go
generated
vendored
|
@ -24,9 +24,11 @@ import (
|
|||
"net/url"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"gopkg.in/gcfg.v1"
|
||||
|
||||
|
@ -49,23 +51,33 @@ import (
|
|||
)
|
||||
|
||||
const (
|
||||
ProviderName = "vsphere"
|
||||
ActivePowerState = "poweredOn"
|
||||
SCSIControllerType = "scsi"
|
||||
LSILogicControllerType = "lsiLogic"
|
||||
BusLogicControllerType = "busLogic"
|
||||
PVSCSIControllerType = "pvscsi"
|
||||
LSILogicSASControllerType = "lsiLogic-sas"
|
||||
SCSIControllerLimit = 4
|
||||
SCSIControllerDeviceLimit = 15
|
||||
SCSIDeviceSlots = 16
|
||||
SCSIReservedSlot = 7
|
||||
ThinDiskType = "thin"
|
||||
PreallocatedDiskType = "preallocated"
|
||||
EagerZeroedThickDiskType = "eagerZeroedThick"
|
||||
ZeroedThickDiskType = "zeroedThick"
|
||||
VolDir = "kubevols"
|
||||
RoundTripperDefaultCount = 3
|
||||
ProviderName = "vsphere"
|
||||
ActivePowerState = "poweredOn"
|
||||
SCSIControllerType = "scsi"
|
||||
LSILogicControllerType = "lsiLogic"
|
||||
BusLogicControllerType = "busLogic"
|
||||
PVSCSIControllerType = "pvscsi"
|
||||
LSILogicSASControllerType = "lsiLogic-sas"
|
||||
SCSIControllerLimit = 4
|
||||
SCSIControllerDeviceLimit = 15
|
||||
SCSIDeviceSlots = 16
|
||||
SCSIReservedSlot = 7
|
||||
ThinDiskType = "thin"
|
||||
PreallocatedDiskType = "preallocated"
|
||||
EagerZeroedThickDiskType = "eagerZeroedThick"
|
||||
ZeroedThickDiskType = "zeroedThick"
|
||||
VolDir = "kubevols"
|
||||
RoundTripperDefaultCount = 3
|
||||
DummyVMPrefixName = "vsphere-k8s"
|
||||
VSANDatastoreType = "vsan"
|
||||
MAC_OUI_VC = "00:50:56"
|
||||
MAC_OUI_ESX = "00:0c:29"
|
||||
DiskNotFoundErrMsg = "No vSphere disk ID found"
|
||||
NoDiskUUIDFoundErrMsg = "No disk UUID found"
|
||||
NoDevicesFoundErrMsg = "No devices found"
|
||||
NonSupportedControllerTypeErrMsg = "Disk is attached to non-supported controller type"
|
||||
FileAlreadyExistErrMsg = "File requested already exist"
|
||||
CleanUpDummyVMRoutine_Interval = 5
|
||||
)
|
||||
|
||||
// Controller types that are currently supported for hot attach of disks
|
||||
|
@ -85,14 +97,17 @@ var diskFormatValidType = map[string]string{
|
|||
}
|
||||
|
||||
var DiskformatValidOptions = generateDiskFormatValidOptions()
|
||||
var cleanUpRoutineInitialized = false
|
||||
|
||||
var ErrNoDiskUUIDFound = errors.New("No disk UUID found")
|
||||
var ErrNoDiskIDFound = errors.New("No vSphere disk ID found")
|
||||
var ErrNoDevicesFound = errors.New("No devices found")
|
||||
var ErrNonSupportedControllerType = errors.New("Disk is attached to non-supported controller type")
|
||||
var ErrFileAlreadyExist = errors.New("File requested already exist")
|
||||
var ErrNoDiskUUIDFound = errors.New(NoDiskUUIDFoundErrMsg)
|
||||
var ErrNoDiskIDFound = errors.New(DiskNotFoundErrMsg)
|
||||
var ErrNoDevicesFound = errors.New(NoDevicesFoundErrMsg)
|
||||
var ErrNonSupportedControllerType = errors.New(NonSupportedControllerTypeErrMsg)
|
||||
var ErrFileAlreadyExist = errors.New(FileAlreadyExistErrMsg)
|
||||
|
||||
var clientLock sync.Mutex
|
||||
var cleanUpRoutineInitLock sync.Mutex
|
||||
var cleanUpDummyVMLock sync.RWMutex
|
||||
|
||||
// VSphere is an implementation of cloud provider Interface for VSphere.
|
||||
type VSphere struct {
|
||||
|
@ -166,11 +181,12 @@ type Volumes interface {
|
|||
|
||||
// VolumeOptions specifies capacity, tags, name and diskFormat for a volume.
|
||||
type VolumeOptions struct {
|
||||
CapacityKB int
|
||||
Tags map[string]string
|
||||
Name string
|
||||
DiskFormat string
|
||||
Datastore string
|
||||
CapacityKB int
|
||||
Tags map[string]string
|
||||
Name string
|
||||
DiskFormat string
|
||||
Datastore string
|
||||
StorageProfileData string
|
||||
}
|
||||
|
||||
// Generates Valid Options for Diskformat
|
||||
|
@ -687,6 +703,8 @@ func cleanUpController(ctx context.Context, newSCSIController types.BaseVirtualD
|
|||
|
||||
// Attaches given virtual disk volume to the compute running kubelet.
|
||||
func (vs *VSphere) AttachDisk(vmDiskPath string, nodeName k8stypes.NodeName) (diskID string, diskUUID string, err error) {
|
||||
var newSCSIController types.BaseVirtualDevice
|
||||
|
||||
// Create context
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
@ -722,50 +740,24 @@ func (vs *VSphere) AttachDisk(vmDiskPath string, nodeName k8stypes.NodeName) (di
|
|||
|
||||
var diskControllerType = vs.cfg.Disk.SCSIControllerType
|
||||
// find SCSI controller of particular type from VM devices
|
||||
allSCSIControllers := getSCSIControllers(vmDevices)
|
||||
scsiControllersOfRequiredType := getSCSIControllersOfType(vmDevices, diskControllerType)
|
||||
scsiController := getAvailableSCSIController(scsiControllersOfRequiredType)
|
||||
|
||||
var newSCSICreated = false
|
||||
var newSCSIController types.BaseVirtualDevice
|
||||
|
||||
// creating a scsi controller as there is none found of controller type defined
|
||||
newSCSICreated := false
|
||||
if scsiController == nil {
|
||||
if len(allSCSIControllers) >= SCSIControllerLimit {
|
||||
// we reached the maximum number of controllers we can attach
|
||||
return "", "", fmt.Errorf("SCSI Controller Limit of %d has been reached, cannot create another SCSI controller", SCSIControllerLimit)
|
||||
}
|
||||
glog.V(1).Infof("Creating a SCSI controller of %v type", diskControllerType)
|
||||
newSCSIController, err := vmDevices.CreateSCSIController(diskControllerType)
|
||||
newSCSIController, err = createAndAttachSCSIControllerToVM(ctx, vm, diskControllerType)
|
||||
if err != nil {
|
||||
k8runtime.HandleError(fmt.Errorf("error creating new SCSI controller: %v", err))
|
||||
return "", "", err
|
||||
}
|
||||
configNewSCSIController := newSCSIController.(types.BaseVirtualSCSIController).GetVirtualSCSIController()
|
||||
hotAndRemove := true
|
||||
configNewSCSIController.HotAddRemove = &hotAndRemove
|
||||
configNewSCSIController.SharedBus = types.VirtualSCSISharing(types.VirtualSCSISharingNoSharing)
|
||||
|
||||
// add the scsi controller to virtual machine
|
||||
err = vm.AddDevice(context.TODO(), newSCSIController)
|
||||
if err != nil {
|
||||
glog.V(1).Infof("cannot add SCSI controller to vm - %v", err)
|
||||
// attempt clean up of scsi controller
|
||||
if vmDevices, err := vm.Device(ctx); err == nil {
|
||||
cleanUpController(ctx, newSCSIController, vmDevices, vm)
|
||||
}
|
||||
glog.Errorf("Failed to create SCSI controller for VM :%q with err: %+v", vm.Name(), err)
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
// verify scsi controller in virtual machine
|
||||
vmDevices, err = vm.Device(ctx)
|
||||
vmDevices, err := vm.Device(ctx)
|
||||
if err != nil {
|
||||
// cannot cleanup if there is no device list
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
// Get VM device list
|
||||
_, vmDevices, _, err := getVirtualMachineDevices(ctx, vs.cfg, vs.client, vSphereInstance)
|
||||
_, vmDevices, _, err = getVirtualMachineDevices(ctx, vs.cfg, vs.client, vSphereInstance)
|
||||
if err != nil {
|
||||
glog.Errorf("cannot get vmDevices for VM err=%s", err)
|
||||
return "", "", fmt.Errorf("cannot get vmDevices for VM err=%s", err)
|
||||
|
@ -798,7 +790,7 @@ func (vs *VSphere) AttachDisk(vmDiskPath string, nodeName k8stypes.NodeName) (di
|
|||
glog.Errorf("Failed while searching for datastore %+q. err %s", datastorePathObj.Datastore, err)
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
vmDiskPath = removeClusterFromVDiskPath(vmDiskPath)
|
||||
disk := vmDevices.CreateDisk(scsiController, ds.Reference(), vmDiskPath)
|
||||
unitNumber, err := getNextUnitNumber(vmDevices, scsiController)
|
||||
if err != nil {
|
||||
|
@ -1045,6 +1037,7 @@ func checkDiskAttached(volPath string, vmdevices object.VirtualDeviceList, dc *o
|
|||
|
||||
// Returns the object key that denotes the controller object to which vmdk is attached.
|
||||
func getVirtualDiskControllerKey(volPath string, vmDevices object.VirtualDeviceList, dc *object.Datacenter, client *govmomi.Client) (int32, error) {
|
||||
volPath = removeClusterFromVDiskPath(volPath)
|
||||
volumeUUID, err := getVirtualDiskUUIDByPath(volPath, dc, client)
|
||||
|
||||
if err != nil {
|
||||
|
@ -1175,7 +1168,7 @@ func (vs *VSphere) DetachDisk(volPath string, nodeName k8stypes.NodeName) error
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
volPath = removeClusterFromVDiskPath(volPath)
|
||||
diskID, err := getVirtualDiskID(volPath, vmDevices, dc, vs.client)
|
||||
if err != nil {
|
||||
glog.Warningf("disk ID not found for %v ", volPath)
|
||||
|
@ -1200,8 +1193,8 @@ func (vs *VSphere) DetachDisk(volPath string, nodeName k8stypes.NodeName) error
|
|||
// CreateVolume creates a volume of given size (in KiB).
|
||||
func (vs *VSphere) CreateVolume(volumeOptions *VolumeOptions) (volumePath string, err error) {
|
||||
|
||||
var diskFormat string
|
||||
var datastore string
|
||||
var destVolPath string
|
||||
|
||||
// Default datastore is the datastore in the vSphere config file that is used initialize vSphere cloud provider.
|
||||
if volumeOptions.Datastore == "" {
|
||||
|
@ -1220,8 +1213,6 @@ func (vs *VSphere) CreateVolume(volumeOptions *VolumeOptions) (volumePath string
|
|||
" Valid options are %s.", volumeOptions.DiskFormat, DiskformatValidOptions)
|
||||
}
|
||||
|
||||
diskFormat = diskFormatValidType[volumeOptions.DiskFormat]
|
||||
|
||||
// Create context
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
@ -1246,43 +1237,105 @@ func (vs *VSphere) CreateVolume(volumeOptions *VolumeOptions) (volumePath string
|
|||
return "", err
|
||||
}
|
||||
|
||||
// vmdks will be created inside kubevols directory
|
||||
kubeVolsPath := filepath.Clean(ds.Path(VolDir)) + "/"
|
||||
err = makeDirectoryInDatastore(vs.client, dc, kubeVolsPath, false)
|
||||
if err != nil && err != ErrFileAlreadyExist {
|
||||
glog.Errorf("Cannot create dir %#v. err %s", kubeVolsPath, err)
|
||||
return "", err
|
||||
}
|
||||
glog.V(4).Infof("Created dir with path as %+q", kubeVolsPath)
|
||||
// Create a disk with the VSAN storage capabilities specified in the volumeOptions.StorageProfileData.
|
||||
// This is achieved by following steps:
|
||||
// 1. Create dummy VM if not already present.
|
||||
// 2. Add a new disk to the VM by performing VM reconfigure.
|
||||
// 3. Detach the new disk from the dummy VM.
|
||||
// 4. Delete the dummy VM.
|
||||
if volumeOptions.StorageProfileData != "" {
|
||||
// Check if the datastore is VSAN if any capability requirements are specified.
|
||||
// VSphere cloud provider now only supports VSAN capabilities requirements
|
||||
ok, err := checkIfDatastoreTypeIsVSAN(vs.client, ds)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("Failed while determining whether the datastore: %q"+
|
||||
" is VSAN or not.", datastore)
|
||||
}
|
||||
if !ok {
|
||||
return "", fmt.Errorf("The specified datastore: %q is not a VSAN datastore."+
|
||||
" The policy parameters will work only with VSAN Datastore."+
|
||||
" So, please specify a valid VSAN datastore in Storage class definition.", datastore)
|
||||
}
|
||||
|
||||
vmDiskPath := kubeVolsPath + volumeOptions.Name + ".vmdk"
|
||||
// Acquire a read lock to ensure multiple PVC requests can be processed simultaneously.
|
||||
cleanUpDummyVMLock.RLock()
|
||||
defer cleanUpDummyVMLock.RUnlock()
|
||||
|
||||
// Create a virtual disk manager
|
||||
virtualDiskManager := object.NewVirtualDiskManager(vs.client.Client)
|
||||
// Create a new background routine that will delete any dummy VM's that are left stale.
|
||||
// This routine will get executed for every 5 minutes and gets initiated only once in its entire lifetime.
|
||||
cleanUpRoutineInitLock.Lock()
|
||||
if !cleanUpRoutineInitialized {
|
||||
go vs.cleanUpDummyVMs(DummyVMPrefixName)
|
||||
cleanUpRoutineInitialized = true
|
||||
}
|
||||
cleanUpRoutineInitLock.Unlock()
|
||||
|
||||
// Create specification for new virtual disk
|
||||
vmDiskSpec := &types.FileBackedVirtualDiskSpec{
|
||||
VirtualDiskSpec: types.VirtualDiskSpec{
|
||||
AdapterType: LSILogicControllerType,
|
||||
DiskType: diskFormat,
|
||||
},
|
||||
CapacityKb: int64(volumeOptions.CapacityKB),
|
||||
// Check if the VM exists in kubernetes cluster folder.
|
||||
// The kubernetes cluster folder - vs.cfg.Global.WorkingDir is where all the nodes in the kubernetes cluster are created.
|
||||
dummyVMFullName := DummyVMPrefixName + "-" + volumeOptions.Name
|
||||
vmRegex := vs.cfg.Global.WorkingDir + dummyVMFullName
|
||||
dummyVM, err := f.VirtualMachine(ctx, vmRegex)
|
||||
if err != nil {
|
||||
// 1. Create a dummy VM and return the VM reference.
|
||||
dummyVM, err = vs.createDummyVM(ctx, dc, ds, dummyVMFullName)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
|
||||
// 2. Reconfigure the VM to attach the disk with the VSAN policy configured.
|
||||
vmDiskPath, err := vs.createVirtualDiskWithPolicy(ctx, dc, ds, dummyVM, volumeOptions)
|
||||
fileAlreadyExist := false
|
||||
if err != nil {
|
||||
vmDiskPath = filepath.Clean(ds.Path(VolDir)) + "/" + volumeOptions.Name + ".vmdk"
|
||||
errorMessage := fmt.Sprintf("Cannot complete the operation because the file or folder %s already exists", vmDiskPath)
|
||||
if errorMessage == err.Error() {
|
||||
//Skip error and continue to detach the disk as the disk was already created on the datastore.
|
||||
fileAlreadyExist = true
|
||||
glog.V(1).Infof("File: %v already exists", vmDiskPath)
|
||||
} else {
|
||||
glog.Errorf("Failed to attach the disk to VM: %q with err: %+v", dummyVMFullName, err)
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
|
||||
dummyVMNodeName := vmNameToNodeName(dummyVMFullName)
|
||||
// 3. Detach the disk from the dummy VM.
|
||||
err = vs.DetachDisk(vmDiskPath, dummyVMNodeName)
|
||||
if err != nil {
|
||||
if DiskNotFoundErrMsg == err.Error() && fileAlreadyExist {
|
||||
// Skip error if disk was already detached from the dummy VM but still present on the datastore.
|
||||
glog.V(1).Infof("File: %v is already detached", vmDiskPath)
|
||||
} else {
|
||||
glog.Errorf("Failed to detach the disk: %q from VM: %q with err: %+v", vmDiskPath, dummyVMFullName, err)
|
||||
return "", fmt.Errorf("Failed to create the volume: %q with err: %+v", volumeOptions.Name, err)
|
||||
}
|
||||
}
|
||||
|
||||
// 4. Delete the dummy VM
|
||||
err = deleteVM(ctx, dummyVM)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("Failed to destroy the vm: %q with err: %+v", dummyVMFullName, err)
|
||||
}
|
||||
destVolPath = vmDiskPath
|
||||
} else {
|
||||
// Create a virtual disk directly if no VSAN storage capabilities are specified by the user.
|
||||
destVolPath, err = createVirtualDisk(ctx, vs.client, dc, ds, volumeOptions)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("Failed to create the virtual disk having name: %+q with err: %+v", destVolPath, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Create virtual disk
|
||||
task, err := virtualDiskManager.CreateVirtualDisk(ctx, vmDiskPath, dc, vmDiskSpec)
|
||||
if err != nil {
|
||||
return "", err
|
||||
if filepath.Base(datastore) != datastore {
|
||||
// If Datastore is within cluster, add cluster path to the destVolPath
|
||||
destVolPath = strings.Replace(destVolPath, filepath.Base(datastore), datastore, 1)
|
||||
}
|
||||
err = task.Wait(ctx)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return vmDiskPath, nil
|
||||
glog.V(1).Infof("VM Disk path is %+q", destVolPath)
|
||||
return destVolPath, nil
|
||||
}
|
||||
|
||||
// DeleteVolume deletes a volume given volume name.
|
||||
// Also, deletes the folder where the volume resides.
|
||||
func (vs *VSphere) DeleteVolume(vmDiskPath string) error {
|
||||
// Create context
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
@ -1308,7 +1361,24 @@ func (vs *VSphere) DeleteVolume(vmDiskPath string) error {
|
|||
if filepath.Ext(vmDiskPath) != ".vmdk" {
|
||||
vmDiskPath += ".vmdk"
|
||||
}
|
||||
|
||||
// Get the vmDisk Name
|
||||
diskNameWithExt := path.Base(vmDiskPath)
|
||||
diskName := strings.TrimSuffix(diskNameWithExt, filepath.Ext(diskNameWithExt))
|
||||
|
||||
// Search for the dummyVM if present and delete it.
|
||||
dummyVMFullName := DummyVMPrefixName + "-" + diskName
|
||||
vmRegex := vs.cfg.Global.WorkingDir + dummyVMFullName
|
||||
dummyVM, err := f.VirtualMachine(ctx, vmRegex)
|
||||
if err == nil {
|
||||
err = deleteVM(ctx, dummyVM)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to destroy the vm: %q with err: %+v", dummyVMFullName, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Delete virtual disk
|
||||
vmDiskPath = removeClusterFromVDiskPath(vmDiskPath)
|
||||
task, err := virtualDiskManager.DeleteVirtualDisk(ctx, vmDiskPath, dc)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -1356,6 +1426,341 @@ func (vs *VSphere) NodeExists(c *govmomi.Client, nodeName k8stypes.NodeName) (bo
|
|||
return false, nil
|
||||
}
|
||||
|
||||
// A background routine which will be responsible for deleting stale dummy VM's.
|
||||
func (vs *VSphere) cleanUpDummyVMs(dummyVMPrefix string) {
|
||||
// Create context
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
for {
|
||||
time.Sleep(CleanUpDummyVMRoutine_Interval * time.Minute)
|
||||
// Ensure client is logged in and session is valid
|
||||
err := vSphereLogin(ctx, vs)
|
||||
if err != nil {
|
||||
glog.V(4).Infof("[cleanUpDummyVMs] Unable to login to vSphere with err: %+v", err)
|
||||
continue
|
||||
}
|
||||
|
||||
// Create a new finder
|
||||
f := find.NewFinder(vs.client.Client, true)
|
||||
|
||||
// Fetch and set data center
|
||||
dc, err := f.Datacenter(ctx, vs.cfg.Global.Datacenter)
|
||||
if err != nil {
|
||||
glog.V(4).Infof("[cleanUpDummyVMs] Unable to fetch the datacenter: %q with err: %+v", vs.cfg.Global.Datacenter, err)
|
||||
continue
|
||||
}
|
||||
f.SetDatacenter(dc)
|
||||
|
||||
// Get the folder reference for global working directory where the dummy VM needs to be created.
|
||||
vmFolder, err := getFolder(ctx, vs.client, vs.cfg.Global.Datacenter, vs.cfg.Global.WorkingDir)
|
||||
if err != nil {
|
||||
glog.V(4).Infof("[cleanUpDummyVMs] Unable to get the kubernetes folder: %q reference with err: %+v", vs.cfg.Global.WorkingDir, err)
|
||||
continue
|
||||
}
|
||||
|
||||
// A write lock is acquired to make sure the cleanUp routine doesn't delete any VM's created by ongoing PVC requests.
|
||||
cleanUpDummyVMLock.Lock()
|
||||
dummyVMRefList, err := getDummyVMList(ctx, vs.client, vmFolder, dummyVMPrefix)
|
||||
if err != nil {
|
||||
glog.V(4).Infof("[cleanUpDummyVMs] Unable to get dummy VM list in the kubernetes cluster: %q reference with err: %+v", vs.cfg.Global.WorkingDir, err)
|
||||
cleanUpDummyVMLock.Unlock()
|
||||
continue
|
||||
}
|
||||
for _, dummyVMRef := range dummyVMRefList {
|
||||
err = deleteVM(ctx, dummyVMRef)
|
||||
if err != nil {
|
||||
glog.V(4).Infof("[cleanUpDummyVMs] Unable to delete dummy VM: %q with err: %+v", dummyVMRef.Name(), err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
cleanUpDummyVMLock.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
// Get the dummy VM list from the kubernetes working directory.
|
||||
func getDummyVMList(ctx context.Context, c *govmomi.Client, vmFolder *object.Folder, dummyVMPrefix string) ([]*object.VirtualMachine, error) {
|
||||
vmFolders, err := vmFolder.Children(ctx)
|
||||
if err != nil {
|
||||
glog.V(4).Infof("Unable to retrieve the virtual machines from the kubernetes cluster: %+v", vmFolder)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var dummyVMRefList []*object.VirtualMachine
|
||||
pc := property.DefaultCollector(c.Client)
|
||||
for _, vmFolder := range vmFolders {
|
||||
if vmFolder.Reference().Type == "VirtualMachine" {
|
||||
var vmRefs []types.ManagedObjectReference
|
||||
var vmMorefs []mo.VirtualMachine
|
||||
vmRefs = append(vmRefs, vmFolder.Reference())
|
||||
err = pc.Retrieve(ctx, vmRefs, []string{"name"}, &vmMorefs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if strings.HasPrefix(vmMorefs[0].Name, dummyVMPrefix) {
|
||||
dummyVMRefList = append(dummyVMRefList, object.NewVirtualMachine(c.Client, vmRefs[0]))
|
||||
}
|
||||
}
|
||||
}
|
||||
return dummyVMRefList, nil
|
||||
}
|
||||
|
||||
func (vs *VSphere) createDummyVM(ctx context.Context, datacenter *object.Datacenter, datastore *object.Datastore, vmName string) (*object.VirtualMachine, error) {
|
||||
// Create a virtual machine config spec with 1 SCSI adapter.
|
||||
virtualMachineConfigSpec := types.VirtualMachineConfigSpec{
|
||||
Name: vmName,
|
||||
Files: &types.VirtualMachineFileInfo{
|
||||
VmPathName: "[" + datastore.Name() + "]",
|
||||
},
|
||||
NumCPUs: 1,
|
||||
MemoryMB: 4,
|
||||
DeviceChange: []types.BaseVirtualDeviceConfigSpec{
|
||||
&types.VirtualDeviceConfigSpec{
|
||||
Operation: types.VirtualDeviceConfigSpecOperationAdd,
|
||||
Device: &types.ParaVirtualSCSIController{
|
||||
VirtualSCSIController: types.VirtualSCSIController{
|
||||
SharedBus: types.VirtualSCSISharingNoSharing,
|
||||
VirtualController: types.VirtualController{
|
||||
BusNumber: 0,
|
||||
VirtualDevice: types.VirtualDevice{
|
||||
Key: 1000,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// Get the resource pool for current node. This is where dummy VM will be created.
|
||||
resourcePool, err := vs.getCurrentNodeResourcePool(ctx, datacenter)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Get the folder reference for global working directory where the dummy VM needs to be created.
|
||||
vmFolder, err := getFolder(ctx, vs.client, vs.cfg.Global.Datacenter, vs.cfg.Global.WorkingDir)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to get the folder reference for %q with err: %+v", vs.cfg.Global.WorkingDir, err)
|
||||
}
|
||||
|
||||
task, err := vmFolder.CreateVM(ctx, virtualMachineConfigSpec, resourcePool, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
dummyVMTaskInfo, err := task.WaitForResult(ctx, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
vmRef := dummyVMTaskInfo.Result.(object.Reference)
|
||||
dummyVM := object.NewVirtualMachine(vs.client.Client, vmRef.Reference())
|
||||
return dummyVM, nil
|
||||
}
|
||||
|
||||
func (vs *VSphere) getCurrentNodeResourcePool(ctx context.Context, datacenter *object.Datacenter) (*object.ResourcePool, error) {
|
||||
// Create a new finder
|
||||
f := find.NewFinder(vs.client.Client, true)
|
||||
f.SetDatacenter(datacenter)
|
||||
|
||||
vmRegex := vs.cfg.Global.WorkingDir + vs.localInstanceID
|
||||
currentVM, err := f.VirtualMachine(ctx, vmRegex)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
currentVMHost, err := currentVM.HostSystem(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Get the resource pool for the current node.
|
||||
// We create the dummy VM in the same resource pool as current node.
|
||||
resourcePool, err := currentVMHost.ResourcePool(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return resourcePool, nil
|
||||
}
|
||||
|
||||
// Creates a virtual disk with the policy configured to the disk.
|
||||
// A call to this function is made only when a user specifies VSAN storage capabilties in the storage class definition.
|
||||
func (vs *VSphere) createVirtualDiskWithPolicy(ctx context.Context, datacenter *object.Datacenter, datastore *object.Datastore, virtualMachine *object.VirtualMachine, volumeOptions *VolumeOptions) (string, error) {
|
||||
var diskFormat string
|
||||
diskFormat = diskFormatValidType[volumeOptions.DiskFormat]
|
||||
|
||||
vmDevices, err := virtualMachine.Device(ctx)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
var diskControllerType = vs.cfg.Disk.SCSIControllerType
|
||||
// find SCSI controller of particular type from VM devices
|
||||
scsiControllersOfRequiredType := getSCSIControllersOfType(vmDevices, diskControllerType)
|
||||
scsiController := scsiControllersOfRequiredType[0]
|
||||
|
||||
kubeVolsPath := filepath.Clean(datastore.Path(VolDir)) + "/"
|
||||
// Create a kubevols directory in the datastore if one doesn't exist.
|
||||
err = makeDirectoryInDatastore(vs.client, datacenter, kubeVolsPath, false)
|
||||
if err != nil && err != ErrFileAlreadyExist {
|
||||
glog.Errorf("Cannot create dir %#v. err %s", kubeVolsPath, err)
|
||||
return "", err
|
||||
}
|
||||
|
||||
glog.V(4).Infof("Created dir with path as %+q", kubeVolsPath)
|
||||
|
||||
vmDiskPath := kubeVolsPath + volumeOptions.Name + ".vmdk"
|
||||
disk := vmDevices.CreateDisk(scsiController, datastore.Reference(), vmDiskPath)
|
||||
unitNumber, err := getNextUnitNumber(vmDevices, scsiController)
|
||||
if err != nil {
|
||||
glog.Errorf("cannot attach disk to VM, limit reached - %v.", err)
|
||||
return "", err
|
||||
}
|
||||
*disk.UnitNumber = unitNumber
|
||||
disk.CapacityInKB = int64(volumeOptions.CapacityKB)
|
||||
|
||||
backing := disk.Backing.(*types.VirtualDiskFlatVer2BackingInfo)
|
||||
backing.DiskMode = string(types.VirtualDiskModeIndependent_persistent)
|
||||
|
||||
switch diskFormat {
|
||||
case ThinDiskType:
|
||||
backing.ThinProvisioned = types.NewBool(true)
|
||||
case EagerZeroedThickDiskType:
|
||||
backing.EagerlyScrub = types.NewBool(true)
|
||||
default:
|
||||
backing.ThinProvisioned = types.NewBool(false)
|
||||
}
|
||||
|
||||
// Reconfigure VM
|
||||
virtualMachineConfigSpec := types.VirtualMachineConfigSpec{}
|
||||
deviceConfigSpec := &types.VirtualDeviceConfigSpec{
|
||||
Device: disk,
|
||||
Operation: types.VirtualDeviceConfigSpecOperationAdd,
|
||||
FileOperation: types.VirtualDeviceConfigSpecFileOperationCreate,
|
||||
}
|
||||
|
||||
storageProfileSpec := &types.VirtualMachineDefinedProfileSpec{
|
||||
ProfileId: "",
|
||||
ProfileData: &types.VirtualMachineProfileRawData{
|
||||
ExtensionKey: "com.vmware.vim.sps",
|
||||
ObjectData: volumeOptions.StorageProfileData,
|
||||
},
|
||||
}
|
||||
|
||||
deviceConfigSpec.Profile = append(deviceConfigSpec.Profile, storageProfileSpec)
|
||||
virtualMachineConfigSpec.DeviceChange = append(virtualMachineConfigSpec.DeviceChange, deviceConfigSpec)
|
||||
task, err := virtualMachine.Reconfigure(ctx, virtualMachineConfigSpec)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to reconfigure the VM with the disk with err - %v.", err)
|
||||
return "", err
|
||||
}
|
||||
|
||||
err = task.Wait(ctx)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to reconfigure the VM with the disk with err - %v.", err)
|
||||
return "", err
|
||||
}
|
||||
|
||||
return vmDiskPath, nil
|
||||
}
|
||||
|
||||
// creating a scsi controller as there is none found.
|
||||
func createAndAttachSCSIControllerToVM(ctx context.Context, vm *object.VirtualMachine, diskControllerType string) (types.BaseVirtualDevice, error) {
|
||||
// Get VM device list
|
||||
vmDevices, err := vm.Device(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
allSCSIControllers := getSCSIControllers(vmDevices)
|
||||
if len(allSCSIControllers) >= SCSIControllerLimit {
|
||||
// we reached the maximum number of controllers we can attach
|
||||
return nil, fmt.Errorf("SCSI Controller Limit of %d has been reached, cannot create another SCSI controller", SCSIControllerLimit)
|
||||
}
|
||||
newSCSIController, err := vmDevices.CreateSCSIController(diskControllerType)
|
||||
if err != nil {
|
||||
k8runtime.HandleError(fmt.Errorf("error creating new SCSI controller: %v", err))
|
||||
return nil, err
|
||||
}
|
||||
configNewSCSIController := newSCSIController.(types.BaseVirtualSCSIController).GetVirtualSCSIController()
|
||||
hotAndRemove := true
|
||||
configNewSCSIController.HotAddRemove = &hotAndRemove
|
||||
configNewSCSIController.SharedBus = types.VirtualSCSISharing(types.VirtualSCSISharingNoSharing)
|
||||
|
||||
// add the scsi controller to virtual machine
|
||||
err = vm.AddDevice(context.TODO(), newSCSIController)
|
||||
if err != nil {
|
||||
glog.V(1).Infof("cannot add SCSI controller to vm - %v", err)
|
||||
// attempt clean up of scsi controller
|
||||
if vmDevices, err := vm.Device(ctx); err == nil {
|
||||
cleanUpController(ctx, newSCSIController, vmDevices, vm)
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
return newSCSIController, nil
|
||||
}
|
||||
|
||||
// Create a virtual disk.
|
||||
func createVirtualDisk(ctx context.Context, c *govmomi.Client, dc *object.Datacenter, ds *object.Datastore, volumeOptions *VolumeOptions) (string, error) {
|
||||
kubeVolsPath := filepath.Clean(ds.Path(VolDir)) + "/"
|
||||
// Create a kubevols directory in the datastore if one doesn't exist.
|
||||
err := makeDirectoryInDatastore(c, dc, kubeVolsPath, false)
|
||||
if err != nil && err != ErrFileAlreadyExist {
|
||||
glog.Errorf("Cannot create dir %#v. err %s", kubeVolsPath, err)
|
||||
return "", err
|
||||
}
|
||||
|
||||
glog.V(4).Infof("Created dir with path as %+q", kubeVolsPath)
|
||||
vmDiskPath := kubeVolsPath + volumeOptions.Name + ".vmdk"
|
||||
|
||||
diskFormat := diskFormatValidType[volumeOptions.DiskFormat]
|
||||
|
||||
// Create a virtual disk manager
|
||||
virtualDiskManager := object.NewVirtualDiskManager(c.Client)
|
||||
|
||||
// Create specification for new virtual disk
|
||||
vmDiskSpec := &types.FileBackedVirtualDiskSpec{
|
||||
VirtualDiskSpec: types.VirtualDiskSpec{
|
||||
AdapterType: LSILogicControllerType,
|
||||
DiskType: diskFormat,
|
||||
},
|
||||
CapacityKb: int64(volumeOptions.CapacityKB),
|
||||
}
|
||||
|
||||
// Create virtual disk
|
||||
task, err := virtualDiskManager.CreateVirtualDisk(ctx, vmDiskPath, dc, vmDiskSpec)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return vmDiskPath, task.Wait(ctx)
|
||||
}
|
||||
|
||||
// Check if the provided datastore is VSAN
|
||||
func checkIfDatastoreTypeIsVSAN(c *govmomi.Client, datastore *object.Datastore) (bool, error) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
pc := property.DefaultCollector(c.Client)
|
||||
|
||||
// Convert datastores into list of references
|
||||
var dsRefs []types.ManagedObjectReference
|
||||
dsRefs = append(dsRefs, datastore.Reference())
|
||||
|
||||
// Retrieve summary property for the given datastore
|
||||
var dsMorefs []mo.Datastore
|
||||
err := pc.Retrieve(ctx, dsRefs, []string{"summary"}, &dsMorefs)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
for _, ds := range dsMorefs {
|
||||
if ds.Summary.Type == VSANDatastoreType {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// Creates a folder using the specified name.
|
||||
// If the intermediate level folders do not exist,
|
||||
// and the parameter createParents is true,
|
||||
|
@ -1378,3 +1783,70 @@ func makeDirectoryInDatastore(c *govmomi.Client, dc *object.Datacenter, path str
|
|||
|
||||
return err
|
||||
}
|
||||
|
||||
// Get the folder for a given VM
|
||||
func getFolder(ctx context.Context, c *govmomi.Client, datacenterName string, folderName string) (*object.Folder, error) {
|
||||
f := find.NewFinder(c.Client, true)
|
||||
|
||||
// Fetch and set data center
|
||||
dc, err := f.Datacenter(ctx, datacenterName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
f.SetDatacenter(dc)
|
||||
|
||||
folderName = strings.TrimSuffix(folderName, "/")
|
||||
dcFolders, err := dc.Folders(ctx)
|
||||
vmFolders, _ := dcFolders.VmFolder.Children(ctx)
|
||||
|
||||
var vmFolderRefs []types.ManagedObjectReference
|
||||
for _, vmFolder := range vmFolders {
|
||||
vmFolderRefs = append(vmFolderRefs, vmFolder.Reference())
|
||||
}
|
||||
|
||||
// Get only references of type folder.
|
||||
var folderRefs []types.ManagedObjectReference
|
||||
for _, vmFolder := range vmFolderRefs {
|
||||
if vmFolder.Type == "Folder" {
|
||||
folderRefs = append(folderRefs, vmFolder)
|
||||
}
|
||||
}
|
||||
|
||||
// Find the specific folder reference matching the folder name.
|
||||
var resultFolder *object.Folder
|
||||
pc := property.DefaultCollector(c.Client)
|
||||
for _, folderRef := range folderRefs {
|
||||
var refs []types.ManagedObjectReference
|
||||
var folderMorefs []mo.Folder
|
||||
refs = append(refs, folderRef)
|
||||
err = pc.Retrieve(ctx, refs, []string{"name"}, &folderMorefs)
|
||||
for _, fref := range folderMorefs {
|
||||
if fref.Name == folderName {
|
||||
resultFolder = object.NewFolder(c.Client, folderRef)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return resultFolder, nil
|
||||
}
|
||||
|
||||
// Delete the VM.
|
||||
func deleteVM(ctx context.Context, vm *object.VirtualMachine) error {
|
||||
destroyTask, err := vm.Destroy(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return destroyTask.Wait(ctx)
|
||||
}
|
||||
|
||||
// Remove the cluster or folder path from the vDiskPath
|
||||
// for vDiskPath [DatastoreCluster/sharedVmfs-0] kubevols/e2e-vmdk-1234.vmdk, return value is [sharedVmfs-0] kubevols/e2e-vmdk-1234.vmdk
|
||||
// for vDiskPath [sharedVmfs-0] kubevols/e2e-vmdk-1234.vmdk, return value remains same [sharedVmfs-0] kubevols/e2e-vmdk-1234.vmdk
|
||||
|
||||
func removeClusterFromVDiskPath(vDiskPath string) string {
|
||||
datastore := regexp.MustCompile("\\[(.*?)\\]").FindStringSubmatch(vDiskPath)[1]
|
||||
if filepath.Base(datastore) != datastore {
|
||||
vDiskPath = strings.Replace(vDiskPath, datastore, filepath.Base(datastore), 1)
|
||||
}
|
||||
return vDiskPath
|
||||
}
|
||||
|
|
22
vendor/k8s.io/kubernetes/pkg/controller/daemon/util/daemonset_util.go
generated
vendored
22
vendor/k8s.io/kubernetes/pkg/controller/daemon/util/daemonset_util.go
generated
vendored
|
@ -27,10 +27,30 @@ import (
|
|||
)
|
||||
|
||||
// GetPodTemplateWithHash returns copy of provided template with additional
|
||||
// label which contains hash of provided template
|
||||
// label which contains hash of provided template and sets default daemon tolerations.
|
||||
func GetPodTemplateWithGeneration(template v1.PodTemplateSpec, generation int64) v1.PodTemplateSpec {
|
||||
obj, _ := api.Scheme.DeepCopy(template)
|
||||
newTemplate := obj.(v1.PodTemplateSpec)
|
||||
// DaemonSet pods shouldn't be deleted by NodeController in case of node problems.
|
||||
// Add infinite toleration for taint notReady:NoExecute here
|
||||
// to survive taint-based eviction enforced by NodeController
|
||||
// when node turns not ready.
|
||||
v1.AddOrUpdateTolerationInPodSpec(&newTemplate.Spec, &v1.Toleration{
|
||||
Key: metav1.TaintNodeNotReady,
|
||||
Operator: v1.TolerationOpExists,
|
||||
Effect: v1.TaintEffectNoExecute,
|
||||
})
|
||||
|
||||
// DaemonSet pods shouldn't be deleted by NodeController in case of node problems.
|
||||
// Add infinite toleration for taint unreachable:NoExecute here
|
||||
// to survive taint-based eviction enforced by NodeController
|
||||
// when node turns unreachable.
|
||||
v1.AddOrUpdateTolerationInPodSpec(&newTemplate.Spec, &v1.Toleration{
|
||||
Key: metav1.TaintNodeUnreachable,
|
||||
Operator: v1.TolerationOpExists,
|
||||
Effect: v1.TaintEffectNoExecute,
|
||||
})
|
||||
|
||||
templateGenerationStr := fmt.Sprint(generation)
|
||||
newTemplate.ObjectMeta.Labels = labelsutil.CloneAndAddLabel(
|
||||
template.ObjectMeta.Labels,
|
||||
|
|
6
vendor/k8s.io/kubernetes/pkg/controller/deployment/deployment_controller.go
generated
vendored
6
vendor/k8s.io/kubernetes/pkg/controller/deployment/deployment_controller.go
generated
vendored
|
@ -52,7 +52,11 @@ import (
|
|||
|
||||
const (
|
||||
// maxRetries is the number of times a deployment will be retried before it is dropped out of the queue.
|
||||
maxRetries = 5
|
||||
// With the current rate-limiter in use (5ms*2^(maxRetries-1)) the following numbers represent the times
|
||||
// a deployment is going to be requeued:
|
||||
//
|
||||
// 5ms, 10ms, 20ms, 40ms, 80ms, 160ms, 320ms, 640ms, 1.3s, 2.6s, 5.1s, 10.2s, 20.4s, 41s, 82s
|
||||
maxRetries = 15
|
||||
)
|
||||
|
||||
// controllerKind contains the schema.GroupVersionKind for this controller type.
|
||||
|
|
29
vendor/k8s.io/kubernetes/pkg/controller/deployment/sync_test.go
generated
vendored
29
vendor/k8s.io/kubernetes/pkg/controller/deployment/sync_test.go
generated
vendored
|
@ -31,9 +31,9 @@ import (
|
|||
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
|
||||
)
|
||||
|
||||
func maxSurge(val int) *intstr.IntOrString {
|
||||
surge := intstr.FromInt(val)
|
||||
return &surge
|
||||
func intOrStrP(val int) *intstr.IntOrString {
|
||||
intOrStr := intstr.FromInt(val)
|
||||
return &intOrStr
|
||||
}
|
||||
|
||||
func TestScale(t *testing.T) {
|
||||
|
@ -218,8 +218,8 @@ func TestScale(t *testing.T) {
|
|||
},
|
||||
{
|
||||
name: "deployment with surge pods",
|
||||
deployment: newDeployment("foo", 20, nil, maxSurge(2), nil, nil),
|
||||
oldDeployment: newDeployment("foo", 10, nil, maxSurge(2), nil, nil),
|
||||
deployment: newDeployment("foo", 20, nil, intOrStrP(2), nil, nil),
|
||||
oldDeployment: newDeployment("foo", 10, nil, intOrStrP(2), nil, nil),
|
||||
|
||||
newRS: rs("foo-v2", 6, nil, newTimestamp),
|
||||
oldRSs: []*extensions.ReplicaSet{rs("foo-v1", 6, nil, oldTimestamp)},
|
||||
|
@ -229,8 +229,8 @@ func TestScale(t *testing.T) {
|
|||
},
|
||||
{
|
||||
name: "change both surge and size",
|
||||
deployment: newDeployment("foo", 50, nil, maxSurge(6), nil, nil),
|
||||
oldDeployment: newDeployment("foo", 10, nil, maxSurge(3), nil, nil),
|
||||
deployment: newDeployment("foo", 50, nil, intOrStrP(6), nil, nil),
|
||||
oldDeployment: newDeployment("foo", 10, nil, intOrStrP(3), nil, nil),
|
||||
|
||||
newRS: rs("foo-v2", 5, nil, newTimestamp),
|
||||
oldRSs: []*extensions.ReplicaSet{rs("foo-v1", 8, nil, oldTimestamp)},
|
||||
|
@ -249,6 +249,21 @@ func TestScale(t *testing.T) {
|
|||
expectedNew: nil,
|
||||
expectedOld: []*extensions.ReplicaSet{rs("foo-v2", 10, nil, newTimestamp), rs("foo-v1", 4, nil, oldTimestamp)},
|
||||
},
|
||||
{
|
||||
name: "saturated but broken new replica set does not affect old pods",
|
||||
deployment: newDeployment("foo", 2, nil, intOrStrP(1), intOrStrP(1), nil),
|
||||
oldDeployment: newDeployment("foo", 2, nil, intOrStrP(1), intOrStrP(1), nil),
|
||||
|
||||
newRS: func() *extensions.ReplicaSet {
|
||||
rs := rs("foo-v2", 2, nil, newTimestamp)
|
||||
rs.Status.AvailableReplicas = 0
|
||||
return rs
|
||||
}(),
|
||||
oldRSs: []*extensions.ReplicaSet{rs("foo-v1", 1, nil, oldTimestamp)},
|
||||
|
||||
expectedNew: rs("foo-v2", 2, nil, newTimestamp),
|
||||
expectedOld: []*extensions.ReplicaSet{rs("foo-v1", 1, nil, oldTimestamp)},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
|
|
7
vendor/k8s.io/kubernetes/pkg/controller/deployment/util/deployment_util.go
generated
vendored
7
vendor/k8s.io/kubernetes/pkg/controller/deployment/util/deployment_util.go
generated
vendored
|
@ -1059,7 +1059,8 @@ func NewRSNewReplicas(deployment *extensions.Deployment, allRSs []*extensions.Re
|
|||
|
||||
// IsSaturated checks if the new replica set is saturated by comparing its size with its deployment size.
|
||||
// Both the deployment and the replica set have to believe this replica set can own all of the desired
|
||||
// replicas in the deployment and the annotation helps in achieving that.
|
||||
// replicas in the deployment and the annotation helps in achieving that. All pods of the ReplicaSet
|
||||
// need to be available.
|
||||
func IsSaturated(deployment *extensions.Deployment, rs *extensions.ReplicaSet) bool {
|
||||
if rs == nil {
|
||||
return false
|
||||
|
@ -1069,7 +1070,9 @@ func IsSaturated(deployment *extensions.Deployment, rs *extensions.ReplicaSet) b
|
|||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return *(rs.Spec.Replicas) == *(deployment.Spec.Replicas) && int32(desired) == *(deployment.Spec.Replicas)
|
||||
return *(rs.Spec.Replicas) == *(deployment.Spec.Replicas) &&
|
||||
int32(desired) == *(deployment.Spec.Replicas) &&
|
||||
rs.Status.AvailableReplicas == *(deployment.Spec.Replicas)
|
||||
}
|
||||
|
||||
// WaitForObservedDeployment polls for deployment to be updated so that deployment.Status.ObservedGeneration >= desiredGeneration.
|
||||
|
|
38
vendor/k8s.io/kubernetes/pkg/controller/node/controller_utils.go
generated
vendored
38
vendor/k8s.io/kubernetes/pkg/controller/node/controller_utils.go
generated
vendored
|
@ -36,8 +36,9 @@ import (
|
|||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||
extensionslisters "k8s.io/kubernetes/pkg/client/listers/extensions/v1beta1"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/kubelet/util/format"
|
||||
"k8s.io/kubernetes/pkg/util/node"
|
||||
nodepkg "k8s.io/kubernetes/pkg/util/node"
|
||||
utilversion "k8s.io/kubernetes/pkg/util/version"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
@ -102,12 +103,12 @@ func deletePods(kubeClient clientset.Interface, recorder record.EventRecorder, n
|
|||
// setPodTerminationReason attempts to set a reason and message in the pod status, updates it in the apiserver,
|
||||
// and returns an error if it encounters one.
|
||||
func setPodTerminationReason(kubeClient clientset.Interface, pod *v1.Pod, nodeName string) (*v1.Pod, error) {
|
||||
if pod.Status.Reason == node.NodeUnreachablePodReason {
|
||||
if pod.Status.Reason == nodepkg.NodeUnreachablePodReason {
|
||||
return pod, nil
|
||||
}
|
||||
|
||||
pod.Status.Reason = node.NodeUnreachablePodReason
|
||||
pod.Status.Message = fmt.Sprintf(node.NodeUnreachablePodMessage, nodeName, pod.Name)
|
||||
pod.Status.Reason = nodepkg.NodeUnreachablePodReason
|
||||
pod.Status.Message = fmt.Sprintf(nodepkg.NodeUnreachablePodMessage, nodeName, pod.Name)
|
||||
|
||||
var updatedPod *v1.Pod
|
||||
var err error
|
||||
|
@ -286,3 +287,32 @@ func recordNodeStatusChange(recorder record.EventRecorder, node *v1.Node, new_st
|
|||
// and event is recorded or neither should happen, see issue #6055.
|
||||
recorder.Eventf(ref, v1.EventTypeNormal, new_status, "Node %s status is now: %s", node.Name, new_status)
|
||||
}
|
||||
|
||||
// Returns true in case of success and false otherwise
|
||||
func swapNodeControllerTaint(kubeClient clientset.Interface, taintToAdd, taintToRemove *v1.Taint, node *v1.Node) bool {
|
||||
taintToAdd.TimeAdded = metav1.Now()
|
||||
err := controller.AddOrUpdateTaintOnNode(kubeClient, node.Name, taintToAdd)
|
||||
if err != nil {
|
||||
utilruntime.HandleError(
|
||||
fmt.Errorf(
|
||||
"unable to taint %v unresponsive Node %q: %v",
|
||||
taintToAdd.Key,
|
||||
node.Name,
|
||||
err))
|
||||
return false
|
||||
}
|
||||
glog.V(4).Infof("Added %v Taint to Node %v", taintToAdd, node.Name)
|
||||
|
||||
err = controller.RemoveTaintOffNode(kubeClient, node.Name, taintToRemove, node)
|
||||
if err != nil {
|
||||
utilruntime.HandleError(
|
||||
fmt.Errorf(
|
||||
"unable to remove %v unneeded taint from unresponsive Node %q: %v",
|
||||
taintToRemove.Key,
|
||||
node.Name,
|
||||
err))
|
||||
return false
|
||||
}
|
||||
glog.V(4).Infof("Made sure that Node %v has no %v Taint", node.Name, taintToRemove)
|
||||
return true
|
||||
}
|
||||
|
|
177
vendor/k8s.io/kubernetes/pkg/controller/node/nodecontroller.go
generated
vendored
177
vendor/k8s.io/kubernetes/pkg/controller/node/nodecontroller.go
generated
vendored
|
@ -478,6 +478,74 @@ func NewNodeController(
|
|||
return nc, nil
|
||||
}
|
||||
|
||||
func (nc *NodeController) doEvictionPass() {
|
||||
nc.evictorLock.Lock()
|
||||
defer nc.evictorLock.Unlock()
|
||||
for k := range nc.zonePodEvictor {
|
||||
// Function should return 'false' and a time after which it should be retried, or 'true' if it shouldn't (it succeeded).
|
||||
nc.zonePodEvictor[k].Try(func(value TimedValue) (bool, time.Duration) {
|
||||
node, err := nc.nodeLister.Get(value.Value)
|
||||
if apierrors.IsNotFound(err) {
|
||||
glog.Warningf("Node %v no longer present in nodeLister!", value.Value)
|
||||
} else if err != nil {
|
||||
glog.Warningf("Failed to get Node %v from the nodeLister: %v", value.Value, err)
|
||||
} else {
|
||||
zone := utilnode.GetZoneKey(node)
|
||||
EvictionsNumber.WithLabelValues(zone).Inc()
|
||||
}
|
||||
nodeUid, _ := value.UID.(string)
|
||||
remaining, err := deletePods(nc.kubeClient, nc.recorder, value.Value, nodeUid, nc.daemonSetStore)
|
||||
if err != nil {
|
||||
utilruntime.HandleError(fmt.Errorf("unable to evict node %q: %v", value.Value, err))
|
||||
return false, 0
|
||||
}
|
||||
if remaining {
|
||||
glog.Infof("Pods awaiting deletion due to NodeController eviction")
|
||||
}
|
||||
return true, 0
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func (nc *NodeController) doTaintingPass() {
|
||||
nc.evictorLock.Lock()
|
||||
defer nc.evictorLock.Unlock()
|
||||
for k := range nc.zoneNotReadyOrUnreachableTainer {
|
||||
// Function should return 'false' and a time after which it should be retried, or 'true' if it shouldn't (it succeeded).
|
||||
nc.zoneNotReadyOrUnreachableTainer[k].Try(func(value TimedValue) (bool, time.Duration) {
|
||||
node, err := nc.nodeLister.Get(value.Value)
|
||||
if apierrors.IsNotFound(err) {
|
||||
glog.Warningf("Node %v no longer present in nodeLister!", value.Value)
|
||||
return true, 0
|
||||
} else if err != nil {
|
||||
glog.Warningf("Failed to get Node %v from the nodeLister: %v", value.Value, err)
|
||||
// retry in 50 millisecond
|
||||
return false, 50 * time.Millisecond
|
||||
} else {
|
||||
zone := utilnode.GetZoneKey(node)
|
||||
EvictionsNumber.WithLabelValues(zone).Inc()
|
||||
}
|
||||
_, condition := v1.GetNodeCondition(&node.Status, v1.NodeReady)
|
||||
// Because we want to mimic NodeStatus.Condition["Ready"] we make "unreachable" and "not ready" taints mutually exclusive.
|
||||
taintToAdd := v1.Taint{}
|
||||
oppositeTaint := v1.Taint{}
|
||||
if condition.Status == v1.ConditionFalse {
|
||||
taintToAdd = *NotReadyTaintTemplate
|
||||
oppositeTaint = *UnreachableTaintTemplate
|
||||
} else if condition.Status == v1.ConditionUnknown {
|
||||
taintToAdd = *UnreachableTaintTemplate
|
||||
oppositeTaint = *NotReadyTaintTemplate
|
||||
} else {
|
||||
// It seems that the Node is ready again, so there's no need to taint it.
|
||||
glog.V(4).Infof("Node %v was in a taint queue, but it's ready now. Ignoring taint request.", value.Value)
|
||||
return true, 0
|
||||
}
|
||||
|
||||
return swapNodeControllerTaint(nc.kubeClient, &taintToAdd, &oppositeTaint, node), 0
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Run starts an asynchronous loop that monitors the status of cluster nodes.
|
||||
func (nc *NodeController) Run() {
|
||||
go func() {
|
||||
|
@ -502,101 +570,12 @@ func (nc *NodeController) Run() {
|
|||
if nc.useTaintBasedEvictions {
|
||||
// Handling taint based evictions. Because we don't want a dedicated logic in TaintManager for NC-originated
|
||||
// taints and we normally don't rate limit evictions caused by taints, we need to rate limit adding taints.
|
||||
go wait.Until(func() {
|
||||
nc.evictorLock.Lock()
|
||||
defer nc.evictorLock.Unlock()
|
||||
for k := range nc.zoneNotReadyOrUnreachableTainer {
|
||||
// Function should return 'false' and a time after which it should be retried, or 'true' if it shouldn't (it succeeded).
|
||||
nc.zoneNotReadyOrUnreachableTainer[k].Try(func(value TimedValue) (bool, time.Duration) {
|
||||
node, err := nc.nodeLister.Get(value.Value)
|
||||
if apierrors.IsNotFound(err) {
|
||||
glog.Warningf("Node %v no longer present in nodeLister!", value.Value)
|
||||
return true, 0
|
||||
} else if err != nil {
|
||||
glog.Warningf("Failed to get Node %v from the nodeLister: %v", value.Value, err)
|
||||
// retry in 50 millisecond
|
||||
return false, 50 * time.Millisecond
|
||||
} else {
|
||||
zone := utilnode.GetZoneKey(node)
|
||||
EvictionsNumber.WithLabelValues(zone).Inc()
|
||||
}
|
||||
_, condition := v1.GetNodeCondition(&node.Status, v1.NodeReady)
|
||||
// Because we want to mimic NodeStatus.Condition["Ready"] we make "unreachable" and "not ready" taints mutually exclusive.
|
||||
taintToAdd := v1.Taint{}
|
||||
oppositeTaint := v1.Taint{}
|
||||
if condition.Status == v1.ConditionFalse {
|
||||
taintToAdd = *NotReadyTaintTemplate
|
||||
oppositeTaint = *UnreachableTaintTemplate
|
||||
} else if condition.Status == v1.ConditionUnknown {
|
||||
taintToAdd = *UnreachableTaintTemplate
|
||||
oppositeTaint = *NotReadyTaintTemplate
|
||||
} else {
|
||||
// It seems that the Node is ready again, so there's no need to taint it.
|
||||
glog.V(4).Infof("Node %v was in a taint queue, but it's ready now. Ignoring taint request.", value.Value)
|
||||
return true, 0
|
||||
}
|
||||
|
||||
taintToAdd.TimeAdded = metav1.Now()
|
||||
err = controller.AddOrUpdateTaintOnNode(nc.kubeClient, value.Value, &taintToAdd)
|
||||
if err != nil {
|
||||
utilruntime.HandleError(
|
||||
fmt.Errorf(
|
||||
"unable to taint %v unresponsive Node %q: %v",
|
||||
taintToAdd.Key,
|
||||
value.Value,
|
||||
err))
|
||||
return false, 0
|
||||
} else {
|
||||
glog.V(4).Info("Added %v Taint to Node %v", taintToAdd, value.Value)
|
||||
}
|
||||
err = controller.RemoveTaintOffNode(nc.kubeClient, value.Value, &oppositeTaint, node)
|
||||
if err != nil {
|
||||
utilruntime.HandleError(
|
||||
fmt.Errorf(
|
||||
"unable to remove %v unneeded taint from unresponsive Node %q: %v",
|
||||
oppositeTaint.Key,
|
||||
value.Value,
|
||||
err))
|
||||
return false, 0
|
||||
} else {
|
||||
glog.V(4).Info("Made sure that Node %v has no %v Taint", value.Value, oppositeTaint)
|
||||
}
|
||||
return true, 0
|
||||
})
|
||||
}
|
||||
}, nodeEvictionPeriod, wait.NeverStop)
|
||||
go wait.Until(nc.doTaintingPass, nodeEvictionPeriod, wait.NeverStop)
|
||||
} else {
|
||||
// Managing eviction of nodes:
|
||||
// When we delete pods off a node, if the node was not empty at the time we then
|
||||
// queue an eviction watcher. If we hit an error, retry deletion.
|
||||
go wait.Until(func() {
|
||||
nc.evictorLock.Lock()
|
||||
defer nc.evictorLock.Unlock()
|
||||
for k := range nc.zonePodEvictor {
|
||||
// Function should return 'false' and a time after which it should be retried, or 'true' if it shouldn't (it succeeded).
|
||||
nc.zonePodEvictor[k].Try(func(value TimedValue) (bool, time.Duration) {
|
||||
node, err := nc.nodeLister.Get(value.Value)
|
||||
if apierrors.IsNotFound(err) {
|
||||
glog.Warningf("Node %v no longer present in nodeLister!", value.Value)
|
||||
} else if err != nil {
|
||||
glog.Warningf("Failed to get Node %v from the nodeLister: %v", value.Value, err)
|
||||
} else {
|
||||
zone := utilnode.GetZoneKey(node)
|
||||
EvictionsNumber.WithLabelValues(zone).Inc()
|
||||
}
|
||||
nodeUid, _ := value.UID.(string)
|
||||
remaining, err := deletePods(nc.kubeClient, nc.recorder, value.Value, nodeUid, nc.daemonSetStore)
|
||||
if err != nil {
|
||||
utilruntime.HandleError(fmt.Errorf("unable to evict node %q: %v", value.Value, err))
|
||||
return false, 0
|
||||
}
|
||||
if remaining {
|
||||
glog.Infof("Pods awaiting deletion due to NodeController eviction")
|
||||
}
|
||||
return true, 0
|
||||
})
|
||||
}
|
||||
}, nodeEvictionPeriod, wait.NeverStop)
|
||||
go wait.Until(nc.doEvictionPass, nodeEvictionPeriod, wait.NeverStop)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
@ -685,7 +664,13 @@ func (nc *NodeController) monitorNodeStatus() error {
|
|||
// Check eviction timeout against decisionTimestamp
|
||||
if observedReadyCondition.Status == v1.ConditionFalse {
|
||||
if nc.useTaintBasedEvictions {
|
||||
if nc.markNodeForTainting(node) {
|
||||
// We want to update the taint straight away if Node is already tainted with the UnreachableTaint
|
||||
if v1.TaintExists(node.Spec.Taints, UnreachableTaintTemplate) {
|
||||
taintToAdd := *NotReadyTaintTemplate
|
||||
if !swapNodeControllerTaint(nc.kubeClient, &taintToAdd, UnreachableTaintTemplate, node) {
|
||||
glog.Errorf("Failed to instantly swap UnreachableTaint to NotReadyTaint. Will try again in the next cycle.")
|
||||
}
|
||||
} else if nc.markNodeForTainting(node) {
|
||||
glog.V(2).Infof("Node %v is NotReady as of %v. Adding it to the Taint queue.",
|
||||
node.Name,
|
||||
decisionTimestamp,
|
||||
|
@ -706,7 +691,13 @@ func (nc *NodeController) monitorNodeStatus() error {
|
|||
}
|
||||
if observedReadyCondition.Status == v1.ConditionUnknown {
|
||||
if nc.useTaintBasedEvictions {
|
||||
if nc.markNodeForTainting(node) {
|
||||
// We want to update the taint straight away if Node is already tainted with the UnreachableTaint
|
||||
if v1.TaintExists(node.Spec.Taints, NotReadyTaintTemplate) {
|
||||
taintToAdd := *UnreachableTaintTemplate
|
||||
if !swapNodeControllerTaint(nc.kubeClient, &taintToAdd, NotReadyTaintTemplate, node) {
|
||||
glog.Errorf("Failed to instantly swap UnreachableTaint to NotReadyTaint. Will try again in the next cycle.")
|
||||
}
|
||||
} else if nc.markNodeForTainting(node) {
|
||||
glog.V(2).Infof("Node %v is unresponsive as of %v. Adding it to the Taint queue.",
|
||||
node.Name,
|
||||
decisionTimestamp,
|
||||
|
|
164
vendor/k8s.io/kubernetes/pkg/controller/node/nodecontroller_test.go
generated
vendored
164
vendor/k8s.io/kubernetes/pkg/controller/node/nodecontroller_test.go
generated
vendored
|
@ -74,7 +74,9 @@ func NewNodeControllerFromClient(
|
|||
clusterCIDR *net.IPNet,
|
||||
serviceCIDR *net.IPNet,
|
||||
nodeCIDRMaskSize int,
|
||||
allocateNodeCIDRs bool) (*nodeController, error) {
|
||||
allocateNodeCIDRs bool,
|
||||
useTaints bool,
|
||||
) (*nodeController, error) {
|
||||
|
||||
factory := informers.NewSharedInformerFactory(kubeClient, controller.NoResyncPeriodFunc())
|
||||
|
||||
|
@ -99,8 +101,8 @@ func NewNodeControllerFromClient(
|
|||
serviceCIDR,
|
||||
nodeCIDRMaskSize,
|
||||
allocateNodeCIDRs,
|
||||
false,
|
||||
false,
|
||||
useTaints,
|
||||
useTaints,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -549,7 +551,7 @@ func TestMonitorNodeStatusEvictPods(t *testing.T) {
|
|||
for _, item := range table {
|
||||
nodeController, _ := NewNodeControllerFromClient(nil, item.fakeNodeHandler,
|
||||
evictionTimeout, testRateLimiterQPS, testRateLimiterQPS, testLargeClusterThreshold, testUnhealtyThreshold, testNodeMonitorGracePeriod,
|
||||
testNodeStartupGracePeriod, testNodeMonitorPeriod, nil, nil, 0, false)
|
||||
testNodeStartupGracePeriod, testNodeMonitorPeriod, nil, nil, 0, false, false)
|
||||
nodeController.now = func() metav1.Time { return fakeNow }
|
||||
nodeController.recorder = testutil.NewFakeRecorder()
|
||||
for _, ds := range item.daemonSets {
|
||||
|
@ -698,7 +700,7 @@ func TestPodStatusChange(t *testing.T) {
|
|||
for _, item := range table {
|
||||
nodeController, _ := NewNodeControllerFromClient(nil, item.fakeNodeHandler,
|
||||
evictionTimeout, testRateLimiterQPS, testRateLimiterQPS, testLargeClusterThreshold, testUnhealtyThreshold, testNodeMonitorGracePeriod,
|
||||
testNodeStartupGracePeriod, testNodeMonitorPeriod, nil, nil, 0, false)
|
||||
testNodeStartupGracePeriod, testNodeMonitorPeriod, nil, nil, 0, false, false)
|
||||
nodeController.now = func() metav1.Time { return fakeNow }
|
||||
nodeController.recorder = testutil.NewFakeRecorder()
|
||||
if err := syncNodeStore(nodeController, item.fakeNodeHandler); err != nil {
|
||||
|
@ -1215,7 +1217,7 @@ func TestMonitorNodeStatusEvictPodsWithDisruption(t *testing.T) {
|
|||
}
|
||||
nodeController, _ := NewNodeControllerFromClient(nil, fakeNodeHandler,
|
||||
evictionTimeout, testRateLimiterQPS, testRateLimiterQPS, testLargeClusterThreshold, testUnhealtyThreshold, testNodeMonitorGracePeriod,
|
||||
testNodeStartupGracePeriod, testNodeMonitorPeriod, nil, nil, 0, false)
|
||||
testNodeStartupGracePeriod, testNodeMonitorPeriod, nil, nil, 0, false, false)
|
||||
nodeController.now = func() metav1.Time { return fakeNow }
|
||||
nodeController.enterPartialDisruptionFunc = func(nodeNum int) float32 {
|
||||
return testRateLimiterQPS
|
||||
|
@ -1310,7 +1312,7 @@ func TestCloudProviderNoRateLimit(t *testing.T) {
|
|||
nodeController, _ := NewNodeControllerFromClient(nil, fnh, 10*time.Minute,
|
||||
testRateLimiterQPS, testRateLimiterQPS, testLargeClusterThreshold, testUnhealtyThreshold,
|
||||
testNodeMonitorGracePeriod, testNodeStartupGracePeriod,
|
||||
testNodeMonitorPeriod, nil, nil, 0, false)
|
||||
testNodeMonitorPeriod, nil, nil, 0, false, false)
|
||||
nodeController.cloud = &fakecloud.FakeCloud{}
|
||||
nodeController.now = func() metav1.Time { return metav1.Date(2016, 1, 1, 12, 0, 0, 0, time.UTC) }
|
||||
nodeController.recorder = testutil.NewFakeRecorder()
|
||||
|
@ -1579,7 +1581,7 @@ func TestMonitorNodeStatusUpdateStatus(t *testing.T) {
|
|||
for i, item := range table {
|
||||
nodeController, _ := NewNodeControllerFromClient(nil, item.fakeNodeHandler, 5*time.Minute,
|
||||
testRateLimiterQPS, testRateLimiterQPS, testLargeClusterThreshold, testUnhealtyThreshold,
|
||||
testNodeMonitorGracePeriod, testNodeStartupGracePeriod, testNodeMonitorPeriod, nil, nil, 0, false)
|
||||
testNodeMonitorGracePeriod, testNodeStartupGracePeriod, testNodeMonitorPeriod, nil, nil, 0, false, false)
|
||||
nodeController.now = func() metav1.Time { return fakeNow }
|
||||
nodeController.recorder = testutil.NewFakeRecorder()
|
||||
if err := syncNodeStore(nodeController, item.fakeNodeHandler); err != nil {
|
||||
|
@ -1813,7 +1815,7 @@ func TestMonitorNodeStatusMarkPodsNotReady(t *testing.T) {
|
|||
for i, item := range table {
|
||||
nodeController, _ := NewNodeControllerFromClient(nil, item.fakeNodeHandler, 5*time.Minute,
|
||||
testRateLimiterQPS, testRateLimiterQPS, testLargeClusterThreshold, testUnhealtyThreshold,
|
||||
testNodeMonitorGracePeriod, testNodeStartupGracePeriod, testNodeMonitorPeriod, nil, nil, 0, false)
|
||||
testNodeMonitorGracePeriod, testNodeStartupGracePeriod, testNodeMonitorPeriod, nil, nil, 0, false, false)
|
||||
nodeController.now = func() metav1.Time { return fakeNow }
|
||||
nodeController.recorder = testutil.NewFakeRecorder()
|
||||
if err := syncNodeStore(nodeController, item.fakeNodeHandler); err != nil {
|
||||
|
@ -1845,6 +1847,146 @@ func TestMonitorNodeStatusMarkPodsNotReady(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestSwapUnreachableNotReadyTaints(t *testing.T) {
|
||||
fakeNow := metav1.Date(2017, 1, 1, 12, 0, 0, 0, time.UTC)
|
||||
evictionTimeout := 10 * time.Minute
|
||||
|
||||
fakeNodeHandler := &testutil.FakeNodeHandler{
|
||||
Existing: []*v1.Node{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node0",
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
Labels: map[string]string{
|
||||
metav1.LabelZoneRegion: "region1",
|
||||
metav1.LabelZoneFailureDomain: "zone1",
|
||||
},
|
||||
},
|
||||
Status: v1.NodeStatus{
|
||||
Conditions: []v1.NodeCondition{
|
||||
{
|
||||
Type: v1.NodeReady,
|
||||
Status: v1.ConditionUnknown,
|
||||
LastHeartbeatTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
|
||||
LastTransitionTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
// Because of the logic that prevents NC from evicting anything when all Nodes are NotReady
|
||||
// we need second healthy node in tests. Because of how the tests are written we need to update
|
||||
// the status of this Node.
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node1",
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
Labels: map[string]string{
|
||||
metav1.LabelZoneRegion: "region1",
|
||||
metav1.LabelZoneFailureDomain: "zone1",
|
||||
},
|
||||
},
|
||||
Status: v1.NodeStatus{
|
||||
Conditions: []v1.NodeCondition{
|
||||
{
|
||||
Type: v1.NodeReady,
|
||||
Status: v1.ConditionTrue,
|
||||
LastHeartbeatTime: metav1.Date(2017, 1, 1, 12, 0, 0, 0, time.UTC),
|
||||
LastTransitionTime: metav1.Date(2017, 1, 1, 12, 0, 0, 0, time.UTC),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Clientset: fake.NewSimpleClientset(&v1.PodList{Items: []v1.Pod{*testutil.NewPod("pod0", "node0")}}),
|
||||
}
|
||||
timeToPass := evictionTimeout
|
||||
newNodeStatus := v1.NodeStatus{
|
||||
Conditions: []v1.NodeCondition{
|
||||
{
|
||||
Type: v1.NodeReady,
|
||||
Status: v1.ConditionFalse,
|
||||
// Node status has just been updated, and is NotReady for 10min.
|
||||
LastHeartbeatTime: metav1.Date(2017, 1, 1, 12, 9, 0, 0, time.UTC),
|
||||
LastTransitionTime: metav1.Date(2017, 1, 1, 12, 0, 0, 0, time.UTC),
|
||||
},
|
||||
},
|
||||
}
|
||||
healthyNodeNewStatus := v1.NodeStatus{
|
||||
Conditions: []v1.NodeCondition{
|
||||
{
|
||||
Type: v1.NodeReady,
|
||||
Status: v1.ConditionTrue,
|
||||
LastHeartbeatTime: metav1.Date(2017, 1, 1, 12, 10, 0, 0, time.UTC),
|
||||
LastTransitionTime: metav1.Date(2017, 1, 1, 12, 0, 0, 0, time.UTC),
|
||||
},
|
||||
},
|
||||
}
|
||||
originalTaint := UnreachableTaintTemplate
|
||||
updatedTaint := NotReadyTaintTemplate
|
||||
|
||||
nodeController, _ := NewNodeControllerFromClient(nil, fakeNodeHandler,
|
||||
evictionTimeout, testRateLimiterQPS, testRateLimiterQPS, testLargeClusterThreshold, testUnhealtyThreshold, testNodeMonitorGracePeriod,
|
||||
testNodeStartupGracePeriod, testNodeMonitorPeriod, nil, nil, 0, false, true)
|
||||
nodeController.now = func() metav1.Time { return fakeNow }
|
||||
nodeController.recorder = testutil.NewFakeRecorder()
|
||||
if err := syncNodeStore(nodeController, fakeNodeHandler); err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
if err := nodeController.monitorNodeStatus(); err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
nodeController.doTaintingPass()
|
||||
|
||||
node0, err := fakeNodeHandler.Get("node0", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Errorf("Can't get current node0...")
|
||||
return
|
||||
}
|
||||
node1, err := fakeNodeHandler.Get("node1", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Errorf("Can't get current node1...")
|
||||
return
|
||||
}
|
||||
|
||||
if originalTaint != nil && !v1.TaintExists(node0.Spec.Taints, originalTaint) {
|
||||
t.Errorf("Can't find taint %v in %v", originalTaint, node0.Spec.Taints)
|
||||
}
|
||||
|
||||
nodeController.now = func() metav1.Time { return metav1.Time{Time: fakeNow.Add(timeToPass)} }
|
||||
|
||||
node0.Status = newNodeStatus
|
||||
node1.Status = healthyNodeNewStatus
|
||||
_, err = fakeNodeHandler.UpdateStatus(node0)
|
||||
if err != nil {
|
||||
t.Errorf(err.Error())
|
||||
return
|
||||
}
|
||||
_, err = fakeNodeHandler.UpdateStatus(node1)
|
||||
if err != nil {
|
||||
t.Errorf(err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
if err := syncNodeStore(nodeController, fakeNodeHandler); err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
if err := nodeController.monitorNodeStatus(); err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
nodeController.doTaintingPass()
|
||||
|
||||
node0, err = fakeNodeHandler.Get("node0", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Errorf("Can't get current node0...")
|
||||
return
|
||||
}
|
||||
if updatedTaint != nil {
|
||||
if !v1.TaintExists(node0.Spec.Taints, updatedTaint) {
|
||||
t.Errorf("Can't find taint %v in %v", updatedTaint, node0.Spec.Taints)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestNodeEventGeneration(t *testing.T) {
|
||||
fakeNow := metav1.Date(2016, 9, 10, 12, 0, 0, 0, time.UTC)
|
||||
fakeNodeHandler := &testutil.FakeNodeHandler{
|
||||
|
@ -1876,7 +2018,7 @@ func TestNodeEventGeneration(t *testing.T) {
|
|||
nodeController, _ := NewNodeControllerFromClient(nil, fakeNodeHandler, 5*time.Minute,
|
||||
testRateLimiterQPS, testRateLimiterQPS, testLargeClusterThreshold, testUnhealtyThreshold,
|
||||
testNodeMonitorGracePeriod, testNodeStartupGracePeriod,
|
||||
testNodeMonitorPeriod, nil, nil, 0, false)
|
||||
testNodeMonitorPeriod, nil, nil, 0, false, false)
|
||||
nodeController.cloud = &fakecloud.FakeCloud{}
|
||||
nodeController.nodeExistsInCloudProvider = func(nodeName types.NodeName) (bool, error) {
|
||||
return false, nil
|
||||
|
@ -1987,7 +2129,7 @@ func TestCheckPod(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
nc, _ := NewNodeControllerFromClient(nil, fake.NewSimpleClientset(), 0, 0, 0, 0, 0, 0, 0, 0, nil, nil, 0, false)
|
||||
nc, _ := NewNodeControllerFromClient(nil, fake.NewSimpleClientset(), 0, 0, 0, 0, 0, 0, 0, 0, nil, nil, 0, false, false)
|
||||
nc.nodeInformer.Informer().GetStore().Add(&v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "new",
|
||||
|
|
2
vendor/k8s.io/kubernetes/pkg/controller/node/testutil/BUILD
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/controller/node/testutil/BUILD
generated
vendored
|
@ -17,6 +17,7 @@ go_library(
|
|||
"//pkg/client/clientset_generated/clientset/fake:go_default_library",
|
||||
"//pkg/client/clientset_generated/clientset/typed/core/v1:go_default_library",
|
||||
"//pkg/util/node:go_default_library",
|
||||
"//vendor:github.com/evanphx/json-patch",
|
||||
"//vendor:github.com/golang/glog",
|
||||
"//vendor:k8s.io/apimachinery/pkg/api/errors",
|
||||
"//vendor:k8s.io/apimachinery/pkg/api/resource",
|
||||
|
@ -24,6 +25,7 @@ go_library(
|
|||
"//vendor:k8s.io/apimachinery/pkg/runtime",
|
||||
"//vendor:k8s.io/apimachinery/pkg/types",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/sets",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/strategicpatch",
|
||||
"//vendor:k8s.io/apimachinery/pkg/watch",
|
||||
"//vendor:k8s.io/client-go/pkg/api/v1",
|
||||
"//vendor:k8s.io/client-go/util/clock",
|
||||
|
|
105
vendor/k8s.io/kubernetes/pkg/controller/node/testutil/test_utils.go
generated
vendored
105
vendor/k8s.io/kubernetes/pkg/controller/node/testutil/test_utils.go
generated
vendored
|
@ -17,6 +17,7 @@ limitations under the License.
|
|||
package testutil
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
|
@ -28,16 +29,19 @@ import (
|
|||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/strategicpatch"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
|
||||
clientv1 "k8s.io/client-go/pkg/api/v1"
|
||||
"k8s.io/client-go/util/clock"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset/fake"
|
||||
v1core "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1"
|
||||
utilnode "k8s.io/kubernetes/pkg/util/node"
|
||||
|
||||
"github.com/evanphx/json-patch"
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
|
@ -189,6 +193,7 @@ func (m *FakeNodeHandler) Update(node *v1.Node) (*v1.Node, error) {
|
|||
m.RequestCount++
|
||||
m.lock.Unlock()
|
||||
}()
|
||||
|
||||
nodeCopy := *node
|
||||
for i, updateNode := range m.UpdatedNodes {
|
||||
if updateNode.Name == nodeCopy.Name {
|
||||
|
@ -207,6 +212,35 @@ func (m *FakeNodeHandler) UpdateStatus(node *v1.Node) (*v1.Node, error) {
|
|||
m.RequestCount++
|
||||
m.lock.Unlock()
|
||||
}()
|
||||
|
||||
var origNodeCopy v1.Node
|
||||
found := false
|
||||
for i := range m.Existing {
|
||||
if m.Existing[i].Name == node.Name {
|
||||
origNodeCopy = *m.Existing[i]
|
||||
found = true
|
||||
}
|
||||
}
|
||||
updatedNodeIndex := -1
|
||||
for i := range m.UpdatedNodes {
|
||||
if m.UpdatedNodes[i].Name == node.Name {
|
||||
origNodeCopy = *m.UpdatedNodes[i]
|
||||
updatedNodeIndex = i
|
||||
found = true
|
||||
}
|
||||
}
|
||||
|
||||
if !found {
|
||||
return nil, fmt.Errorf("Not found node %v", node)
|
||||
}
|
||||
|
||||
origNodeCopy.Status = node.Status
|
||||
if updatedNodeIndex < 0 {
|
||||
m.UpdatedNodes = append(m.UpdatedNodes, &origNodeCopy)
|
||||
} else {
|
||||
m.UpdatedNodes[updatedNodeIndex] = &origNodeCopy
|
||||
}
|
||||
|
||||
nodeCopy := *node
|
||||
m.UpdatedNodeStatuses = append(m.UpdatedNodeStatuses, &nodeCopy)
|
||||
return node, nil
|
||||
|
@ -225,7 +259,76 @@ func (m *FakeNodeHandler) Watch(opts metav1.ListOptions) (watch.Interface, error
|
|||
|
||||
// Patch patches a Node in the fake store.
|
||||
func (m *FakeNodeHandler) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (*v1.Node, error) {
|
||||
return nil, nil
|
||||
m.lock.Lock()
|
||||
defer func() {
|
||||
m.RequestCount++
|
||||
m.lock.Unlock()
|
||||
}()
|
||||
var nodeCopy v1.Node
|
||||
for i := range m.Existing {
|
||||
if m.Existing[i].Name == name {
|
||||
nodeCopy = *m.Existing[i]
|
||||
}
|
||||
}
|
||||
updatedNodeIndex := -1
|
||||
for i := range m.UpdatedNodes {
|
||||
if m.UpdatedNodes[i].Name == name {
|
||||
nodeCopy = *m.UpdatedNodes[i]
|
||||
updatedNodeIndex = i
|
||||
}
|
||||
}
|
||||
|
||||
originalObjJS, err := json.Marshal(nodeCopy)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to marshal %v", nodeCopy)
|
||||
return nil, nil
|
||||
}
|
||||
var originalNode v1.Node
|
||||
if err = json.Unmarshal(originalObjJS, &originalNode); err != nil {
|
||||
glog.Errorf("Failed to unmarshall original object: %v", err)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
var patchedObjJS []byte
|
||||
switch pt {
|
||||
case types.JSONPatchType:
|
||||
patchObj, err := jsonpatch.DecodePatch(data)
|
||||
if err != nil {
|
||||
glog.Error(err.Error())
|
||||
return nil, nil
|
||||
}
|
||||
if patchedObjJS, err = patchObj.Apply(originalObjJS); err != nil {
|
||||
glog.Error(err.Error())
|
||||
return nil, nil
|
||||
}
|
||||
case types.MergePatchType:
|
||||
if patchedObjJS, err = jsonpatch.MergePatch(originalObjJS, data); err != nil {
|
||||
glog.Error(err.Error())
|
||||
return nil, nil
|
||||
}
|
||||
case types.StrategicMergePatchType:
|
||||
if patchedObjJS, err = strategicpatch.StrategicMergePatch(originalObjJS, data, originalNode); err != nil {
|
||||
glog.Error(err.Error())
|
||||
return nil, nil
|
||||
}
|
||||
default:
|
||||
glog.Errorf("unknown Content-Type header for patch: %v", pt)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
var updatedNode v1.Node
|
||||
if err = json.Unmarshal(patchedObjJS, &updatedNode); err != nil {
|
||||
glog.Errorf("Failed to unmarshall patched object: %v", err)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
if updatedNodeIndex < 0 {
|
||||
m.UpdatedNodes = append(m.UpdatedNodes, &updatedNode)
|
||||
} else {
|
||||
m.UpdatedNodes[updatedNodeIndex] = &updatedNode
|
||||
}
|
||||
|
||||
return &updatedNode, nil
|
||||
}
|
||||
|
||||
// FakeRecorder is used as a fake during testing.
|
||||
|
|
1
vendor/k8s.io/kubernetes/pkg/controller/service/BUILD
generated
vendored
1
vendor/k8s.io/kubernetes/pkg/controller/service/BUILD
generated
vendored
|
@ -16,6 +16,7 @@ go_library(
|
|||
],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//cmd/kubeadm/app/constants:go_default_library",
|
||||
"//pkg/api:go_default_library",
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//pkg/client/clientset_generated/clientset:go_default_library",
|
||||
|
|
9
vendor/k8s.io/kubernetes/pkg/controller/service/servicecontroller.go
generated
vendored
9
vendor/k8s.io/kubernetes/pkg/controller/service/servicecontroller.go
generated
vendored
|
@ -34,6 +34,7 @@ import (
|
|||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/constants"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||
|
@ -617,10 +618,16 @@ func getNodeConditionPredicate() corelisters.NodeConditionPredicate {
|
|||
return func(node *v1.Node) bool {
|
||||
// We add the master to the node list, but its unschedulable. So we use this to filter
|
||||
// the master.
|
||||
// TODO: Use a node annotation to indicate the master
|
||||
if node.Spec.Unschedulable {
|
||||
return false
|
||||
}
|
||||
|
||||
// As of 1.6, we will taint the master, but not necessarily mark it unschedulable.
|
||||
// Recognize nodes labeled as master, and filter them also, as we were doing previously.
|
||||
if _, hasMasterRoleLabel := node.Labels[constants.LabelNodeRoleMaster]; hasMasterRoleLabel {
|
||||
return false
|
||||
}
|
||||
|
||||
// If we have no info, don't accept
|
||||
if len(node.Status.Conditions) == 0 {
|
||||
return false
|
||||
|
|
7
vendor/k8s.io/kubernetes/pkg/controller/statefulset/stateful_set_control.go
generated
vendored
7
vendor/k8s.io/kubernetes/pkg/controller/statefulset/stateful_set_control.go
generated
vendored
|
@ -132,6 +132,13 @@ func (ssc *defaultStatefulSetControl) UpdateStatefulSet(set *apps.StatefulSet, p
|
|||
if !isCreated(replicas[i]) {
|
||||
return ssc.podControl.CreateStatefulPod(set, replicas[i])
|
||||
}
|
||||
// If we find a Pod that is currently terminating, we must wait until graceful deletion
|
||||
// completes before we continue to make progress.
|
||||
if isTerminating(replicas[i]) {
|
||||
glog.V(2).Infof("StatefulSet %s is waiting for Pod %s to Terminate",
|
||||
set.Name, replicas[i].Name)
|
||||
return nil
|
||||
}
|
||||
// If we have a Pod that has been created but is not running and ready we can not make progress.
|
||||
// We must ensure that all for each Pod, when we create it, all of its predecessors, with respect to its
|
||||
// ordinal, are Running and Ready.
|
||||
|
|
70
vendor/k8s.io/kubernetes/pkg/controller/statefulset/stateful_set_control_test.go
generated
vendored
70
vendor/k8s.io/kubernetes/pkg/controller/statefulset/stateful_set_control_test.go
generated
vendored
|
@ -403,6 +403,72 @@ func TestDefaultStatefulSetControlUpdatePodFailure(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestDefaultStatefulSetControlBlocksOnTerminating(t *testing.T) {
|
||||
set := newStatefulSet(3)
|
||||
client := fake.NewSimpleClientset(set)
|
||||
|
||||
informerFactory := informers.NewSharedInformerFactory(client, controller.NoResyncPeriodFunc())
|
||||
spc := newFakeStatefulPodControl(informerFactory.Core().V1().Pods(), informerFactory.Apps().V1beta1().StatefulSets())
|
||||
ssc := NewDefaultStatefulSetControl(spc)
|
||||
spc.SetUpdateStatefulPodError(apierrors.NewInternalError(errors.New("API server failed")), 0)
|
||||
|
||||
stop := make(chan struct{})
|
||||
defer close(stop)
|
||||
informerFactory.Start(stop)
|
||||
cache.WaitForCacheSync(
|
||||
stop,
|
||||
informerFactory.Apps().V1beta1().StatefulSets().Informer().HasSynced,
|
||||
informerFactory.Core().V1().Pods().Informer().HasSynced,
|
||||
)
|
||||
|
||||
if err := scaleUpStatefulSetControl(set, ssc, spc); err != nil {
|
||||
t.Fatalf("Unexpected error: %v", err)
|
||||
}
|
||||
var err error
|
||||
set, err = spc.setsLister.StatefulSets(set.Namespace).Get(set.Name)
|
||||
if err != nil {
|
||||
t.Fatalf("Error getting updated StatefulSet: %v", err)
|
||||
}
|
||||
if set.Status.Replicas != 3 {
|
||||
t.Fatal("Failed to scale StatefulSet to 3 replicas")
|
||||
}
|
||||
// scale the set and add a terminated pod
|
||||
*set.Spec.Replicas = 4
|
||||
pods, err := spc.addTerminatingPod(set, 2)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := ssc.UpdateStatefulSet(set, pods); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
pods, err = spc.podsLister.List(labels.Everything())
|
||||
if err != nil {
|
||||
t.Fatalf("Error listing pods: %v", err)
|
||||
}
|
||||
if len(pods) != 3 {
|
||||
t.Fatalf("Expected 3 pods, got %d", len(pods))
|
||||
}
|
||||
sort.Sort(ascendingOrdinal(pods))
|
||||
spc.DeleteStatefulPod(set, pods[2])
|
||||
pods, err = spc.podsLister.List(labels.Everything())
|
||||
if err != nil {
|
||||
t.Fatalf("Error listing pods: %v", err)
|
||||
}
|
||||
if len(pods) != 2 {
|
||||
t.Fatalf("Expected 3 pods, got %d", len(pods))
|
||||
}
|
||||
if err := scaleUpStatefulSetControl(set, ssc, spc); err != nil {
|
||||
t.Fatalf("Unexpected error: %v", err)
|
||||
}
|
||||
set, err = spc.setsLister.StatefulSets(set.Namespace).Get(set.Name)
|
||||
if err != nil {
|
||||
t.Fatalf("Error getting updated StatefulSet: %v", err)
|
||||
}
|
||||
if set.Status.Replicas != 4 {
|
||||
t.Fatal("Failed to scale StatefulSet to 3 replicas")
|
||||
}
|
||||
}
|
||||
|
||||
func TestDefaultStatefulSetControlUpdateSetStatusFailure(t *testing.T) {
|
||||
set := newStatefulSet(3)
|
||||
client := fake.NewSimpleClientset(set)
|
||||
|
@ -690,7 +756,7 @@ func (spc *fakeStatefulPodControl) setPodInitStatus(set *apps.StatefulSet, ordin
|
|||
return spc.podsLister.Pods(set.Namespace).List(selector)
|
||||
}
|
||||
|
||||
func (spc *fakeStatefulPodControl) addTerminatedPod(set *apps.StatefulSet, ordinal int) ([]*v1.Pod, error) {
|
||||
func (spc *fakeStatefulPodControl) addTerminatingPod(set *apps.StatefulSet, ordinal int) ([]*v1.Pod, error) {
|
||||
pod := newStatefulSetPod(set, ordinal)
|
||||
pod.Status.Phase = v1.PodRunning
|
||||
deleted := metav1.NewTime(time.Now())
|
||||
|
@ -906,7 +972,7 @@ func scaleDownStatefulSetControl(set *apps.StatefulSet, ssc StatefulSetControlIn
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if pods, err = spc.addTerminatedPod(set, ordinal); err != nil {
|
||||
if pods, err = spc.addTerminatingPod(set, ordinal); err != nil {
|
||||
return err
|
||||
}
|
||||
if err = ssc.UpdateStatefulSet(set, pods); err != nil {
|
||||
|
|
8
vendor/k8s.io/kubernetes/pkg/controller/statefulset/stateful_set_test.go
generated
vendored
8
vendor/k8s.io/kubernetes/pkg/controller/statefulset/stateful_set_test.go
generated
vendored
|
@ -91,11 +91,11 @@ func TestStatefulSetControllerRespectsTermination(t *testing.T) {
|
|||
if set.Status.Replicas != 3 {
|
||||
t.Error("Falied to scale statefulset to 3 replicas")
|
||||
}
|
||||
pods, err := spc.addTerminatedPod(set, 3)
|
||||
pods, err := spc.addTerminatingPod(set, 3)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
pods, err = spc.addTerminatedPod(set, 4)
|
||||
pods, err = spc.addTerminatingPod(set, 4)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
@ -669,7 +669,7 @@ func scaleDownStatefulSetController(set *apps.StatefulSet, ssc *StatefulSetContr
|
|||
spc.setsIndexer.Add(set)
|
||||
ssc.enqueueStatefulSet(set)
|
||||
fakeWorker(ssc)
|
||||
pods, err = spc.addTerminatedPod(set, ord)
|
||||
pods, err = spc.addTerminatingPod(set, ord)
|
||||
pod = getPodAtOrdinal(pods, ord)
|
||||
ssc.updatePod(&prev, pod)
|
||||
fakeWorker(ssc)
|
||||
|
@ -679,7 +679,7 @@ func scaleDownStatefulSetController(set *apps.StatefulSet, ssc *StatefulSetContr
|
|||
for set.Status.Replicas > *set.Spec.Replicas {
|
||||
pods, err = spc.podsLister.Pods(set.Namespace).List(selector)
|
||||
ord := len(pods)
|
||||
pods, err = spc.addTerminatedPod(set, ord)
|
||||
pods, err = spc.addTerminatingPod(set, ord)
|
||||
pod = getPodAtOrdinal(pods, ord)
|
||||
ssc.updatePod(&prev, pod)
|
||||
fakeWorker(ssc)
|
||||
|
|
6
vendor/k8s.io/kubernetes/pkg/controller/statefulset/stateful_set_utils.go
generated
vendored
6
vendor/k8s.io/kubernetes/pkg/controller/statefulset/stateful_set_utils.go
generated
vendored
|
@ -221,14 +221,14 @@ func isFailed(pod *v1.Pod) bool {
|
|||
return pod.Status.Phase == v1.PodFailed
|
||||
}
|
||||
|
||||
// isTerminated returns true if pod's deletion Timestamp has been set
|
||||
func isTerminated(pod *v1.Pod) bool {
|
||||
// isTerminating returns true if pod's DeletionTimestamp has been set
|
||||
func isTerminating(pod *v1.Pod) bool {
|
||||
return pod.DeletionTimestamp != nil
|
||||
}
|
||||
|
||||
// isHealthy returns true if pod is running and ready and has not been terminated
|
||||
func isHealthy(pod *v1.Pod) bool {
|
||||
return isRunningAndReady(pod) && !isTerminated(pod)
|
||||
return isRunningAndReady(pod) && !isTerminating(pod)
|
||||
}
|
||||
|
||||
// newControllerRef returns an ControllerRef pointing to a given StatefulSet.
|
||||
|
|
2
vendor/k8s.io/kubernetes/pkg/generated/openapi/zz_generated.openapi.go
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/generated/openapi/zz_generated.openapi.go
generated
vendored
|
@ -12227,7 +12227,7 @@ func GetOpenAPIDefinitions(ref openapi.ReferenceCallback) map[string]openapi.Ope
|
|||
},
|
||||
"concurrencyPolicy": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "ConcurrencyPolicy specifies how to treat concurrent executions of a Job.",
|
||||
Description: "ConcurrencyPolicy specifies how to treat concurrent executions of a Job. Defaults to Allow.",
|
||||
Type: []string{"string"},
|
||||
Format: "",
|
||||
},
|
||||
|
|
7
vendor/k8s.io/kubernetes/pkg/kubeapiserver/options/authorization.go
generated
vendored
7
vendor/k8s.io/kubernetes/pkg/kubeapiserver/options/authorization.go
generated
vendored
|
@ -75,14 +75,17 @@ func (s *BuiltInAuthorizationOptions) AddFlags(fs *pflag.FlagSet) {
|
|||
|
||||
}
|
||||
|
||||
func (s *BuiltInAuthorizationOptions) ToAuthorizationConfig(informerFactory informers.SharedInformerFactory) authorizer.AuthorizationConfig {
|
||||
func (s *BuiltInAuthorizationOptions) Modes() []string {
|
||||
modes := []string{}
|
||||
if len(s.Mode) > 0 {
|
||||
modes = strings.Split(s.Mode, ",")
|
||||
}
|
||||
return modes
|
||||
}
|
||||
|
||||
func (s *BuiltInAuthorizationOptions) ToAuthorizationConfig(informerFactory informers.SharedInformerFactory) authorizer.AuthorizationConfig {
|
||||
return authorizer.AuthorizationConfig{
|
||||
AuthorizationModes: modes,
|
||||
AuthorizationModes: s.Modes(),
|
||||
PolicyFile: s.PolicyFile,
|
||||
WebhookConfigFile: s.WebhookConfigFile,
|
||||
WebhookCacheAuthorizedTTL: s.WebhookCacheAuthorizedTTL,
|
||||
|
|
2
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/cm/container_manager_linux.go
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/cm/container_manager_linux.go
generated
vendored
|
@ -85,7 +85,7 @@ func (m *containerManager) doWork() {
|
|||
glog.Errorf("Unable to get docker version: %v", err)
|
||||
return
|
||||
}
|
||||
version, err := utilversion.ParseSemantic(v.Version)
|
||||
version, err := utilversion.ParseGeneric(v.Version)
|
||||
if err != nil {
|
||||
glog.Errorf("Unable to parse docker version %q: %v", v.Version, err)
|
||||
return
|
||||
|
|
3
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_service.go
generated
vendored
3
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_service.go
generated
vendored
|
@ -388,6 +388,9 @@ func (ds *dockerService) getDockerAPIVersion() (*semver.Version, error) {
|
|||
} else {
|
||||
dv, err = ds.getDockerVersion()
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
apiVersion, err := semver.Parse(dv.APIVersion)
|
||||
if err != nil {
|
||||
|
|
1
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/security_context.go
generated
vendored
1
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/security_context.go
generated
vendored
|
@ -163,7 +163,6 @@ func modifyHostNetworkOptionForContainer(hostNetwork bool, sandboxID string, hc
|
|||
hc.NetworkMode = dockercontainer.NetworkMode(sandboxNSMode)
|
||||
hc.IpcMode = dockercontainer.IpcMode(sandboxNSMode)
|
||||
hc.UTSMode = ""
|
||||
hc.PidMode = ""
|
||||
|
||||
if hostNetwork {
|
||||
hc.UTSMode = namespaceModeHost
|
||||
|
|
1
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/security_context_test.go
generated
vendored
1
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/security_context_test.go
generated
vendored
|
@ -306,6 +306,7 @@ func TestModifyContainerNamespaceOptions(t *testing.T) {
|
|||
expected: &dockercontainer.HostConfig{
|
||||
NetworkMode: dockercontainer.NetworkMode(sandboxNSMode),
|
||||
IpcMode: dockercontainer.IpcMode(sandboxNSMode),
|
||||
PidMode: namespaceModeHost,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
|
31
vendor/k8s.io/kubernetes/pkg/kubelet/eviction/eviction_manager.go
generated
vendored
31
vendor/k8s.io/kubernetes/pkg/kubelet/eviction/eviction_manager.go
generated
vendored
|
@ -371,23 +371,22 @@ func (m *managerImpl) reclaimNodeLevelResources(resourceToReclaim v1.ResourceNam
|
|||
for _, nodeReclaimFunc := range nodeReclaimFuncs {
|
||||
// attempt to reclaim the pressured resource.
|
||||
reclaimed, err := nodeReclaimFunc()
|
||||
if err == nil {
|
||||
// update our local observations based on the amount reported to have been reclaimed.
|
||||
// note: this is optimistic, other things could have been still consuming the pressured resource in the interim.
|
||||
signal := resourceToSignal[resourceToReclaim]
|
||||
value, ok := observations[signal]
|
||||
if !ok {
|
||||
glog.Errorf("eviction manager: unable to find value associated with signal %v", signal)
|
||||
continue
|
||||
}
|
||||
value.available.Add(*reclaimed)
|
||||
if err != nil {
|
||||
glog.Warningf("eviction manager: unexpected error when attempting to reduce %v pressure: %v", resourceToReclaim, err)
|
||||
}
|
||||
// update our local observations based on the amount reported to have been reclaimed.
|
||||
// note: this is optimistic, other things could have been still consuming the pressured resource in the interim.
|
||||
signal := resourceToSignal[resourceToReclaim]
|
||||
value, ok := observations[signal]
|
||||
if !ok {
|
||||
glog.Errorf("eviction manager: unable to find value associated with signal %v", signal)
|
||||
continue
|
||||
}
|
||||
value.available.Add(*reclaimed)
|
||||
|
||||
// evaluate all current thresholds to see if with adjusted observations, we think we have met min reclaim goals
|
||||
if len(thresholdsMet(m.thresholdsMet, observations, true)) == 0 {
|
||||
return true
|
||||
}
|
||||
} else {
|
||||
glog.Errorf("eviction manager: unexpected error when attempting to reduce %v pressure: %v", resourceToReclaim, err)
|
||||
// evaluate all current thresholds to see if with adjusted observations, we think we have met min reclaim goals
|
||||
if len(thresholdsMet(m.thresholdsMet, observations, true)) == 0 {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
|
|
5
vendor/k8s.io/kubernetes/pkg/kubelet/eviction/helpers.go
generated
vendored
5
vendor/k8s.io/kubernetes/pkg/kubelet/eviction/helpers.go
generated
vendored
|
@ -995,13 +995,10 @@ func deleteImages(imageGC ImageGC, reportBytesFreed bool) nodeReclaimFunc {
|
|||
return func() (*resource.Quantity, error) {
|
||||
glog.Infof("eviction manager: attempting to delete unused images")
|
||||
bytesFreed, err := imageGC.DeleteUnusedImages()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
reclaimed := int64(0)
|
||||
if reportBytesFreed {
|
||||
reclaimed = bytesFreed
|
||||
}
|
||||
return resource.NewQuantity(reclaimed, resource.BinarySI), nil
|
||||
return resource.NewQuantity(reclaimed, resource.BinarySI), err
|
||||
}
|
||||
}
|
||||
|
|
5
vendor/k8s.io/kubernetes/pkg/kubelet/eviction/types.go
generated
vendored
5
vendor/k8s.io/kubernetes/pkg/kubelet/eviction/types.go
generated
vendored
|
@ -76,7 +76,8 @@ type NodeProvider interface {
|
|||
|
||||
// ImageGC is responsible for performing garbage collection of unused images.
|
||||
type ImageGC interface {
|
||||
// DeleteUnusedImages deletes unused images and returns the number of bytes freed, or an error.
|
||||
// DeleteUnusedImages deletes unused images and returns the number of bytes freed, and an error.
|
||||
// This returns the bytes freed even if an error is returned.
|
||||
DeleteUnusedImages() (int64, error)
|
||||
}
|
||||
|
||||
|
@ -118,6 +119,8 @@ type thresholdsObservedAt map[evictionapi.Threshold]time.Time
|
|||
type nodeConditionsObservedAt map[v1.NodeConditionType]time.Time
|
||||
|
||||
// nodeReclaimFunc is a function that knows how to reclaim a resource from the node without impacting pods.
|
||||
// Returns the quantity of resources reclaimed and an error, if applicable.
|
||||
// nodeReclaimFunc return the resources reclaimed even if an error occurs.
|
||||
type nodeReclaimFunc func() (*resource.Quantity, error)
|
||||
|
||||
// nodeReclaimFuncs is an ordered list of nodeReclaimFunc
|
||||
|
|
2
vendor/k8s.io/kubernetes/pkg/kubelet/kubelet.go
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/kubelet/kubelet.go
generated
vendored
|
@ -2053,7 +2053,7 @@ func (kl *Kubelet) updateRuntimeUp() {
|
|||
}
|
||||
// Only check specific conditions when runtime integration type is cri,
|
||||
// because the old integration doesn't populate any runtime condition.
|
||||
if kl.kubeletConfiguration.EnableCRI {
|
||||
if kl.kubeletConfiguration.EnableCRI && kl.kubeletConfiguration.ContainerRuntime != "rkt" {
|
||||
if s == nil {
|
||||
glog.Errorf("Container runtime status is nil")
|
||||
return
|
||||
|
|
26
vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_pods.go
generated
vendored
26
vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_pods.go
generated
vendored
|
@ -55,6 +55,7 @@ import (
|
|||
"k8s.io/kubernetes/pkg/kubelet/status"
|
||||
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
"k8s.io/kubernetes/pkg/kubelet/util/format"
|
||||
"k8s.io/kubernetes/pkg/util"
|
||||
"k8s.io/kubernetes/pkg/util/term"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
|
||||
|
@ -135,7 +136,32 @@ func makeMounts(pod *v1.Pod, podDir string, container *v1.Container, hostName, h
|
|||
return nil, err
|
||||
}
|
||||
if mount.SubPath != "" {
|
||||
fileinfo, err := os.Lstat(hostPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
perm := fileinfo.Mode()
|
||||
|
||||
hostPath = filepath.Join(hostPath, mount.SubPath)
|
||||
|
||||
if subPathExists, err := util.FileExists(hostPath); err != nil {
|
||||
glog.Errorf("Could not determine if subPath %s exists; will not attempt to change its permissions", hostPath)
|
||||
} else if !subPathExists {
|
||||
// Create the sub path now because if it's auto-created later when referenced, it may have an
|
||||
// incorrect ownership and mode. For example, the sub path directory must have at least g+rwx
|
||||
// when the pod specifies an fsGroup, and if the directory is not created here, Docker will
|
||||
// later auto-create it with the incorrect mode 0750
|
||||
if err := os.MkdirAll(hostPath, perm); err != nil {
|
||||
glog.Errorf("failed to mkdir:%s", hostPath)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// chmod the sub path because umask may have prevented us from making the sub path with the same
|
||||
// permissions as the mounter path
|
||||
if err := os.Chmod(hostPath, perm); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Docker Volume Mounts fail on Windows if it is not of the form C:/
|
||||
|
|
8
vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_container.go
generated
vendored
8
vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_container.go
generated
vendored
|
@ -357,10 +357,10 @@ func getTerminationMessage(status *runtimeapi.ContainerStatus, terminationMessag
|
|||
|
||||
// readLastStringFromContainerLogs attempts to read up to the max log length from the end of the CRI log represented
|
||||
// by path. It reads up to max log lines.
|
||||
func readLastStringFromContainerLogs(path string) string {
|
||||
func (m *kubeGenericRuntimeManager) readLastStringFromContainerLogs(path string) string {
|
||||
value := int64(kubecontainer.MaxContainerTerminationMessageLogLines)
|
||||
buf, _ := circbuf.NewBuffer(kubecontainer.MaxContainerTerminationMessageLogLength)
|
||||
if err := ReadLogs(path, &v1.PodLogOptions{TailLines: &value}, buf, buf); err != nil {
|
||||
if err := m.ReadLogs(path, "", &v1.PodLogOptions{TailLines: &value}, buf, buf); err != nil {
|
||||
return fmt.Sprintf("Error on reading termination message from logs: %v", err)
|
||||
}
|
||||
return buf.String()
|
||||
|
@ -414,7 +414,7 @@ func (m *kubeGenericRuntimeManager) getPodContainerStatuses(uid kubetypes.UID, n
|
|||
tMessage, checkLogs := getTerminationMessage(status, annotatedInfo.TerminationMessagePath, fallbackToLogs)
|
||||
if checkLogs {
|
||||
path := buildFullContainerLogsPath(uid, labeledInfo.ContainerName, annotatedInfo.RestartCount)
|
||||
tMessage = readLastStringFromContainerLogs(path)
|
||||
tMessage = m.readLastStringFromContainerLogs(path)
|
||||
}
|
||||
// Use the termination message written by the application is not empty
|
||||
if len(tMessage) != 0 {
|
||||
|
@ -688,7 +688,7 @@ func (m *kubeGenericRuntimeManager) GetContainerLogs(pod *v1.Pod, containerID ku
|
|||
labeledInfo := getContainerInfoFromLabels(status.Labels)
|
||||
annotatedInfo := getContainerInfoFromAnnotations(status.Annotations)
|
||||
path := buildFullContainerLogsPath(pod.UID, labeledInfo.ContainerName, annotatedInfo.RestartCount)
|
||||
return ReadLogs(path, logOptions, stdout, stderr)
|
||||
return m.ReadLogs(path, containerID.ID, logOptions, stdout, stderr)
|
||||
}
|
||||
|
||||
// GetExec gets the endpoint the runtime will serve the exec request from.
|
||||
|
|
71
vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_logs.go
generated
vendored
71
vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_logs.go
generated
vendored
|
@ -32,6 +32,7 @@ import (
|
|||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
runtimeapi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
||||
"k8s.io/kubernetes/pkg/util/tail"
|
||||
)
|
||||
|
||||
|
@ -54,6 +55,11 @@ const (
|
|||
timeFormat = time.RFC3339Nano
|
||||
// blockSize is the block size used in tail.
|
||||
blockSize = 1024
|
||||
|
||||
// stateCheckPeriod is the period to check container state while following
|
||||
// the container log. Kubelet should not keep following the log when the
|
||||
// container is not running.
|
||||
stateCheckPeriod = 5 * time.Second
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -110,7 +116,9 @@ func newLogOptions(apiOpts *v1.PodLogOptions, now time.Time) *logOptions {
|
|||
}
|
||||
|
||||
// ReadLogs read the container log and redirect into stdout and stderr.
|
||||
func ReadLogs(path string, apiOpts *v1.PodLogOptions, stdout, stderr io.Writer) error {
|
||||
// Note that containerID is only needed when following the log, or else
|
||||
// just pass in empty string "".
|
||||
func (m *kubeGenericRuntimeManager) ReadLogs(path, containerID string, apiOpts *v1.PodLogOptions, stdout, stderr io.Writer) error {
|
||||
f, err := os.Open(path)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to open log file %q: %v", path, err)
|
||||
|
@ -166,8 +174,8 @@ func ReadLogs(path string, apiOpts *v1.PodLogOptions, stdout, stderr io.Writer)
|
|||
}
|
||||
}
|
||||
// Wait until the next log change.
|
||||
if err := waitLogs(watcher); err != nil {
|
||||
return fmt.Errorf("failed to wait logs for log file %q: %v", path, err)
|
||||
if found, err := m.waitLogs(containerID, watcher); !found {
|
||||
return err
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
@ -196,6 +204,41 @@ func ReadLogs(path string, apiOpts *v1.PodLogOptions, stdout, stderr io.Writer)
|
|||
}
|
||||
}
|
||||
|
||||
// waitLogs wait for the next log write. It returns a boolean and an error. The boolean
|
||||
// indicates whether a new log is found; the error is error happens during waiting new logs.
|
||||
func (m *kubeGenericRuntimeManager) waitLogs(id string, w *fsnotify.Watcher) (bool, error) {
|
||||
errRetry := 5
|
||||
for {
|
||||
select {
|
||||
case e := <-w.Events:
|
||||
switch e.Op {
|
||||
case fsnotify.Write:
|
||||
return true, nil
|
||||
default:
|
||||
glog.Errorf("Unexpected fsnotify event: %v, retrying...", e)
|
||||
}
|
||||
case err := <-w.Errors:
|
||||
glog.Errorf("Fsnotify watch error: %v, %d error retries remaining", err, errRetry)
|
||||
if errRetry == 0 {
|
||||
return false, err
|
||||
}
|
||||
errRetry--
|
||||
case <-time.After(stateCheckPeriod):
|
||||
s, err := m.runtimeService.ContainerStatus(id)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
// Only keep following container log when it is running.
|
||||
if s.State != runtimeapi.ContainerState_CONTAINER_RUNNING {
|
||||
glog.Errorf("Container %q is not running (state=%q)", id, s.State)
|
||||
// Do not return error because it's normal that the container stops
|
||||
// during waiting.
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// parseFunc is a function parsing one log line to the internal log type.
|
||||
// Notice that the caller must make sure logMessage is not nil.
|
||||
type parseFunc func([]byte, *logMessage) error
|
||||
|
@ -267,28 +310,6 @@ func getParseFunc(log []byte) (parseFunc, error) {
|
|||
return nil, fmt.Errorf("unsupported log format: %q", log)
|
||||
}
|
||||
|
||||
// waitLogs wait for the next log write.
|
||||
func waitLogs(w *fsnotify.Watcher) error {
|
||||
errRetry := 5
|
||||
for {
|
||||
select {
|
||||
case e := <-w.Events:
|
||||
switch e.Op {
|
||||
case fsnotify.Write:
|
||||
return nil
|
||||
default:
|
||||
glog.Errorf("Unexpected fsnotify event: %v, retrying...", e)
|
||||
}
|
||||
case err := <-w.Errors:
|
||||
glog.Errorf("Fsnotify watch error: %v, %d error retries remaining", err, errRetry)
|
||||
if errRetry == 0 {
|
||||
return err
|
||||
}
|
||||
errRetry--
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// logWriter controls the writing into the stream based on the log options.
|
||||
type logWriter struct {
|
||||
stdout io.Writer
|
||||
|
|
18
vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/security_context.go
generated
vendored
18
vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/security_context.go
generated
vendored
|
@ -41,24 +41,24 @@ func (m *kubeGenericRuntimeManager) determineEffectiveSecurityContext(pod *v1.Po
|
|||
}
|
||||
|
||||
// set namespace options and supplemental groups.
|
||||
podSc := pod.Spec.SecurityContext
|
||||
if podSc == nil {
|
||||
return synthesized
|
||||
}
|
||||
synthesized.NamespaceOptions = &runtimeapi.NamespaceOption{
|
||||
HostNetwork: pod.Spec.HostNetwork,
|
||||
HostIpc: pod.Spec.HostIPC,
|
||||
HostPid: pod.Spec.HostPID,
|
||||
}
|
||||
if podSc.FSGroup != nil {
|
||||
synthesized.SupplementalGroups = append(synthesized.SupplementalGroups, *podSc.FSGroup)
|
||||
podSc := pod.Spec.SecurityContext
|
||||
if podSc != nil {
|
||||
if podSc.FSGroup != nil {
|
||||
synthesized.SupplementalGroups = append(synthesized.SupplementalGroups, *podSc.FSGroup)
|
||||
}
|
||||
|
||||
if podSc.SupplementalGroups != nil {
|
||||
synthesized.SupplementalGroups = append(synthesized.SupplementalGroups, podSc.SupplementalGroups...)
|
||||
}
|
||||
}
|
||||
if groups := m.runtimeHelper.GetExtraSupplementalGroupsForPod(pod); len(groups) > 0 {
|
||||
synthesized.SupplementalGroups = append(synthesized.SupplementalGroups, groups...)
|
||||
}
|
||||
if podSc.SupplementalGroups != nil {
|
||||
synthesized.SupplementalGroups = append(synthesized.SupplementalGroups, podSc.SupplementalGroups...)
|
||||
}
|
||||
|
||||
return synthesized
|
||||
}
|
||||
|
|
6
vendor/k8s.io/kubernetes/pkg/kubelet/remote/remote_runtime.go
generated
vendored
6
vendor/k8s.io/kubernetes/pkg/kubelet/remote/remote_runtime.go
generated
vendored
|
@ -209,7 +209,11 @@ func (r *RemoteRuntimeService) StartContainer(containerID string) error {
|
|||
|
||||
// StopContainer stops a running container with a grace period (i.e., timeout).
|
||||
func (r *RemoteRuntimeService) StopContainer(containerID string, timeout int64) error {
|
||||
ctx, cancel := getContextWithTimeout(r.timeout)
|
||||
ctx, cancel := getContextWithTimeout(time.Duration(timeout) * time.Second)
|
||||
if timeout == 0 {
|
||||
// Use default timeout if stop timeout is 0.
|
||||
ctx, cancel = getContextWithTimeout(r.timeout)
|
||||
}
|
||||
defer cancel()
|
||||
|
||||
_, err := r.runtimeClient.StopContainer(ctx, &runtimeapi.StopContainerRequest{
|
||||
|
|
12
vendor/k8s.io/kubernetes/pkg/proxy/healthcheck/BUILD
generated
vendored
12
vendor/k8s.io/kubernetes/pkg/proxy/healthcheck/BUILD
generated
vendored
|
@ -11,20 +11,17 @@ load(
|
|||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"api.go",
|
||||
"doc.go",
|
||||
"healthcheck.go",
|
||||
"http.go",
|
||||
"listener.go",
|
||||
"worker.go",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//vendor:github.com/golang/glog",
|
||||
"//vendor:github.com/renstrom/dedent",
|
||||
"//vendor:k8s.io/apimachinery/pkg/types",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/sets",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/wait",
|
||||
"//vendor:k8s.io/client-go/tools/cache",
|
||||
"//vendor:k8s.io/client-go/pkg/api",
|
||||
"//vendor:k8s.io/client-go/pkg/api/v1",
|
||||
"//vendor:k8s.io/client-go/tools/record",
|
||||
],
|
||||
)
|
||||
|
||||
|
@ -34,6 +31,7 @@ go_test(
|
|||
library = ":go_default_library",
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//vendor:github.com/davecgh/go-spew/spew",
|
||||
"//vendor:k8s.io/apimachinery/pkg/types",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/sets",
|
||||
],
|
||||
|
|
65
vendor/k8s.io/kubernetes/pkg/proxy/healthcheck/api.go
generated
vendored
65
vendor/k8s.io/kubernetes/pkg/proxy/healthcheck/api.go
generated
vendored
|
@ -1,65 +0,0 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package healthcheck
|
||||
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
)
|
||||
|
||||
// All public API Methods for this package
|
||||
|
||||
// UpdateEndpoints Update the set of local endpoints for a service
|
||||
func UpdateEndpoints(serviceName types.NamespacedName, endpointUids sets.String) {
|
||||
req := &proxyMutationRequest{
|
||||
serviceName: serviceName,
|
||||
endpointUids: &endpointUids,
|
||||
}
|
||||
healthchecker.mutationRequestChannel <- req
|
||||
}
|
||||
|
||||
func updateServiceListener(serviceName types.NamespacedName, listenPort int, add bool) bool {
|
||||
responseChannel := make(chan bool)
|
||||
req := &proxyListenerRequest{
|
||||
serviceName: serviceName,
|
||||
listenPort: uint16(listenPort),
|
||||
add: add,
|
||||
responseChannel: responseChannel,
|
||||
}
|
||||
healthchecker.listenerRequestChannel <- req
|
||||
return <-responseChannel
|
||||
}
|
||||
|
||||
// AddServiceListener Request addition of a listener for a service's health check
|
||||
func AddServiceListener(serviceName types.NamespacedName, listenPort int) bool {
|
||||
return updateServiceListener(serviceName, listenPort, true)
|
||||
}
|
||||
|
||||
// DeleteServiceListener Request deletion of a listener for a service's health check
|
||||
func DeleteServiceListener(serviceName types.NamespacedName, listenPort int) bool {
|
||||
return updateServiceListener(serviceName, listenPort, false)
|
||||
}
|
||||
|
||||
// Run Start the healthchecker main loop
|
||||
func Run() {
|
||||
healthchecker = proxyHealthCheckFactory()
|
||||
// Wrap with a wait.Forever to handle panics.
|
||||
go wait.Forever(func() {
|
||||
healthchecker.handlerLoop()
|
||||
}, 0)
|
||||
}
|
2
vendor/k8s.io/kubernetes/pkg/proxy/healthcheck/doc.go
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/proxy/healthcheck/doc.go
generated
vendored
|
@ -14,5 +14,5 @@ See the License for the specific language governing permissions and
|
|||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package healthcheck LoadBalancer Healthcheck responder library for kubernetes network proxies
|
||||
// Package healthcheck provides tools for serving kube-proxy healthchecks.
|
||||
package healthcheck // import "k8s.io/kubernetes/pkg/proxy/healthcheck"
|
||||
|
|
272
vendor/k8s.io/kubernetes/pkg/proxy/healthcheck/healthcheck.go
generated
vendored
272
vendor/k8s.io/kubernetes/pkg/proxy/healthcheck/healthcheck.go
generated
vendored
|
@ -20,108 +20,216 @@ import (
|
|||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/renstrom/dedent"
|
||||
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/pkg/api"
|
||||
clientv1 "k8s.io/client-go/pkg/api/v1"
|
||||
"k8s.io/client-go/tools/record"
|
||||
)
|
||||
|
||||
// proxyMutationRequest: Message to request addition/deletion of endpoints for a service
|
||||
type proxyMutationRequest struct {
|
||||
serviceName types.NamespacedName
|
||||
endpointUids *sets.String
|
||||
// Server serves HTTP endpoints for each service name, with results
|
||||
// based on the endpoints. If there are 0 endpoints for a service, it returns a
|
||||
// 503 "Service Unavailable" error (telling LBs not to use this node). If there
|
||||
// are 1 or more endpoints, it returns a 200 "OK".
|
||||
type Server interface {
|
||||
// Make the new set of services be active. Services that were open before
|
||||
// will be closed. Services that are new will be opened. Service that
|
||||
// existed and are in the new set will be left alone. The value of the map
|
||||
// is the healthcheck-port to listen on.
|
||||
SyncServices(newServices map[types.NamespacedName]uint16) error
|
||||
// Make the new set of endpoints be active. Endpoints for services that do
|
||||
// not exist will be dropped. The value of the map is the number of
|
||||
// endpoints the service has on this node.
|
||||
SyncEndpoints(newEndpoints map[types.NamespacedName]int) error
|
||||
}
|
||||
|
||||
// proxyListenerRequest: Message to request addition/deletion of a service responder on a listening port
|
||||
type proxyListenerRequest struct {
|
||||
serviceName types.NamespacedName
|
||||
listenPort uint16
|
||||
add bool
|
||||
responseChannel chan bool
|
||||
// Listener allows for testing of Server. If the Listener argument
|
||||
// to NewServer() is nil, the real net.Listen function will be used.
|
||||
type Listener interface {
|
||||
// Listen is very much like net.Listen, except the first arg (network) is
|
||||
// fixed to be "tcp".
|
||||
Listen(addr string) (net.Listener, error)
|
||||
}
|
||||
|
||||
// serviceEndpointsList: A list of endpoints for a service
|
||||
type serviceEndpointsList struct {
|
||||
serviceName types.NamespacedName
|
||||
endpoints *sets.String
|
||||
// HTTPServerFactory allows for testing of Server. If the
|
||||
// HTTPServerFactory argument to NewServer() is nil, the real
|
||||
// http.Server type will be used.
|
||||
type HTTPServerFactory interface {
|
||||
// New creates an instance of a type satisfying HTTPServer. This is
|
||||
// designed to include http.Server.
|
||||
New(addr string, handler http.Handler) HTTPServer
|
||||
}
|
||||
|
||||
// serviceResponder: Contains net/http datastructures necessary for responding to each Service's health check on its aux nodePort
|
||||
type serviceResponder struct {
|
||||
serviceName types.NamespacedName
|
||||
listenPort uint16
|
||||
listener *net.Listener
|
||||
server *http.Server
|
||||
// HTTPServer allows for testing of Server.
|
||||
type HTTPServer interface {
|
||||
// Server is designed so that http.Server satifies this interface,
|
||||
Serve(listener net.Listener) error
|
||||
}
|
||||
|
||||
// proxyHC: Handler structure for health check, endpoint add/delete and service listener add/delete requests
|
||||
type proxyHC struct {
|
||||
serviceEndpointsMap cache.ThreadSafeStore
|
||||
serviceResponderMap map[types.NamespacedName]serviceResponder
|
||||
mutationRequestChannel chan *proxyMutationRequest
|
||||
listenerRequestChannel chan *proxyListenerRequest
|
||||
}
|
||||
|
||||
// handleHealthCheckRequest - received a health check request - lookup and respond to HC.
|
||||
func (h *proxyHC) handleHealthCheckRequest(rw http.ResponseWriter, serviceName string) {
|
||||
s, ok := h.serviceEndpointsMap.Get(serviceName)
|
||||
if !ok {
|
||||
glog.V(4).Infof("Service %s not found or has no local endpoints", serviceName)
|
||||
sendHealthCheckResponse(rw, http.StatusServiceUnavailable, "No Service Endpoints Found")
|
||||
return
|
||||
// NewServer allocates a new healthcheck server manager. If either
|
||||
// of the injected arguments are nil, defaults will be used.
|
||||
func NewServer(hostname string, recorder record.EventRecorder, listener Listener, httpServerFactory HTTPServerFactory) Server {
|
||||
if listener == nil {
|
||||
listener = stdNetListener{}
|
||||
}
|
||||
numEndpoints := len(*s.(*serviceEndpointsList).endpoints)
|
||||
if numEndpoints > 0 {
|
||||
sendHealthCheckResponse(rw, http.StatusOK, fmt.Sprintf("%d Service Endpoints found", numEndpoints))
|
||||
return
|
||||
if httpServerFactory == nil {
|
||||
httpServerFactory = stdHTTPServerFactory{}
|
||||
}
|
||||
return &server{
|
||||
hostname: hostname,
|
||||
recorder: recorder,
|
||||
listener: listener,
|
||||
httpFactory: httpServerFactory,
|
||||
services: map[types.NamespacedName]*hcInstance{},
|
||||
}
|
||||
sendHealthCheckResponse(rw, http.StatusServiceUnavailable, "0 local Endpoints are alive")
|
||||
}
|
||||
|
||||
// handleMutationRequest - receive requests to mutate the table entry for a service
|
||||
func (h *proxyHC) handleMutationRequest(req *proxyMutationRequest) {
|
||||
numEndpoints := len(*req.endpointUids)
|
||||
glog.V(4).Infof("LB service health check mutation request Service: %s - %d Endpoints %v",
|
||||
req.serviceName, numEndpoints, (*req.endpointUids).List())
|
||||
if numEndpoints == 0 {
|
||||
if _, ok := h.serviceEndpointsMap.Get(req.serviceName.String()); ok {
|
||||
glog.V(4).Infof("Deleting endpoints map for service %s, all local endpoints gone", req.serviceName.String())
|
||||
h.serviceEndpointsMap.Delete(req.serviceName.String())
|
||||
}
|
||||
return
|
||||
// Implement Listener in terms of net.Listen.
|
||||
type stdNetListener struct{}
|
||||
|
||||
func (stdNetListener) Listen(addr string) (net.Listener, error) {
|
||||
return net.Listen("tcp", addr)
|
||||
}
|
||||
|
||||
var _ Listener = stdNetListener{}
|
||||
|
||||
// Implement HTTPServerFactory in terms of http.Server.
|
||||
type stdHTTPServerFactory struct{}
|
||||
|
||||
func (stdHTTPServerFactory) New(addr string, handler http.Handler) HTTPServer {
|
||||
return &http.Server{
|
||||
Addr: addr,
|
||||
Handler: handler,
|
||||
}
|
||||
var entry *serviceEndpointsList
|
||||
e, exists := h.serviceEndpointsMap.Get(req.serviceName.String())
|
||||
if exists {
|
||||
entry = e.(*serviceEndpointsList)
|
||||
if entry.endpoints.Equal(*req.endpointUids) {
|
||||
return
|
||||
}
|
||||
// Compute differences just for printing logs about additions and removals
|
||||
deletedEndpoints := entry.endpoints.Difference(*req.endpointUids)
|
||||
newEndpoints := req.endpointUids.Difference(*entry.endpoints)
|
||||
for _, e := range newEndpoints.List() {
|
||||
glog.V(4).Infof("Adding local endpoint %s to LB health check for service %s",
|
||||
e, req.serviceName.String())
|
||||
}
|
||||
for _, d := range deletedEndpoints.List() {
|
||||
glog.V(4).Infof("Deleted endpoint %s from service %s LB health check (%d endpoints left)",
|
||||
d, req.serviceName.String(), len(*entry.endpoints))
|
||||
}
|
||||
|
||||
var _ HTTPServerFactory = stdHTTPServerFactory{}
|
||||
|
||||
type server struct {
|
||||
hostname string
|
||||
recorder record.EventRecorder // can be nil
|
||||
listener Listener
|
||||
httpFactory HTTPServerFactory
|
||||
|
||||
lock sync.Mutex
|
||||
services map[types.NamespacedName]*hcInstance
|
||||
}
|
||||
|
||||
func (hcs *server) SyncServices(newServices map[types.NamespacedName]uint16) error {
|
||||
hcs.lock.Lock()
|
||||
defer hcs.lock.Unlock()
|
||||
|
||||
// Remove any that are not needed any more.
|
||||
for nsn, svc := range hcs.services {
|
||||
if port, found := newServices[nsn]; !found || port != svc.port {
|
||||
glog.V(2).Infof("Closing healthcheck %q on port %d", nsn.String(), svc.port)
|
||||
if err := svc.listener.Close(); err != nil {
|
||||
glog.Errorf("Close(%v): %v", svc.listener.Addr(), err)
|
||||
}
|
||||
delete(hcs.services, nsn)
|
||||
}
|
||||
}
|
||||
entry = &serviceEndpointsList{serviceName: req.serviceName, endpoints: req.endpointUids}
|
||||
h.serviceEndpointsMap.Add(req.serviceName.String(), entry)
|
||||
|
||||
// Add any that are needed.
|
||||
for nsn, port := range newServices {
|
||||
if hcs.services[nsn] != nil {
|
||||
glog.V(3).Infof("Existing healthcheck %q on port %d", nsn.String(), port)
|
||||
continue
|
||||
}
|
||||
|
||||
glog.V(2).Infof("Opening healthcheck %q on port %d", nsn.String(), port)
|
||||
svc := &hcInstance{port: port}
|
||||
addr := fmt.Sprintf(":%d", port)
|
||||
svc.server = hcs.httpFactory.New(addr, hcHandler{name: nsn, hcs: hcs})
|
||||
var err error
|
||||
svc.listener, err = hcs.listener.Listen(addr)
|
||||
if err != nil {
|
||||
msg := fmt.Sprintf("node %s failed to start healthcheck %q on port %d: %v", hcs.hostname, nsn.String(), port, err)
|
||||
|
||||
if hcs.recorder != nil {
|
||||
hcs.recorder.Eventf(
|
||||
&clientv1.ObjectReference{
|
||||
Kind: "Service",
|
||||
Namespace: nsn.Namespace,
|
||||
Name: nsn.Name,
|
||||
UID: types.UID(nsn.String()),
|
||||
}, api.EventTypeWarning, "FailedToStartHealthcheck", msg)
|
||||
}
|
||||
glog.Error(msg)
|
||||
continue
|
||||
}
|
||||
hcs.services[nsn] = svc
|
||||
|
||||
go func(nsn types.NamespacedName, svc *hcInstance) {
|
||||
// Serve() will exit when the listener is closed.
|
||||
glog.V(3).Infof("Starting goroutine for healthcheck %q on port %d", nsn.String(), svc.port)
|
||||
if err := svc.server.Serve(svc.listener); err != nil {
|
||||
glog.V(3).Infof("Healthcheck %q closed: %v", nsn.String(), err)
|
||||
return
|
||||
}
|
||||
glog.V(3).Infof("Healthcheck %q closed", nsn.String())
|
||||
}(nsn, svc)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// proxyHealthCheckRequest - Factory method to instantiate the health check handler
|
||||
func proxyHealthCheckFactory() *proxyHC {
|
||||
glog.V(2).Infof("Initializing kube-proxy health checker")
|
||||
phc := &proxyHC{
|
||||
serviceEndpointsMap: cache.NewThreadSafeStore(cache.Indexers{}, cache.Indices{}),
|
||||
serviceResponderMap: make(map[types.NamespacedName]serviceResponder),
|
||||
mutationRequestChannel: make(chan *proxyMutationRequest, 1024),
|
||||
listenerRequestChannel: make(chan *proxyListenerRequest, 1024),
|
||||
}
|
||||
return phc
|
||||
type hcInstance struct {
|
||||
port uint16
|
||||
listener net.Listener
|
||||
server HTTPServer
|
||||
endpoints int // number of local endpoints for a service
|
||||
}
|
||||
|
||||
type hcHandler struct {
|
||||
name types.NamespacedName
|
||||
hcs *server
|
||||
}
|
||||
|
||||
var _ http.Handler = hcHandler{}
|
||||
|
||||
func (h hcHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request) {
|
||||
h.hcs.lock.Lock()
|
||||
count := h.hcs.services[h.name].endpoints
|
||||
h.hcs.lock.Unlock()
|
||||
|
||||
resp.Header().Set("Content-Type", "application/json")
|
||||
if count == 0 {
|
||||
resp.WriteHeader(http.StatusServiceUnavailable)
|
||||
} else {
|
||||
resp.WriteHeader(http.StatusOK)
|
||||
}
|
||||
fmt.Fprintf(resp, strings.Trim(dedent.Dedent(fmt.Sprintf(`
|
||||
{
|
||||
"service": {
|
||||
"namespace": %q,
|
||||
"name": %q
|
||||
},
|
||||
"localEndpoints": %d
|
||||
}
|
||||
`, h.name.Namespace, h.name.Name, count)), "\n"))
|
||||
}
|
||||
|
||||
func (hcs *server) SyncEndpoints(newEndpoints map[types.NamespacedName]int) error {
|
||||
hcs.lock.Lock()
|
||||
defer hcs.lock.Unlock()
|
||||
|
||||
for nsn, count := range newEndpoints {
|
||||
if hcs.services[nsn] == nil {
|
||||
glog.V(3).Infof("Not saving endpoints for unknown healthcheck %q", nsn.String())
|
||||
continue
|
||||
}
|
||||
glog.V(3).Infof("Reporting %d endpoints for healthcheck %q", count, nsn.String())
|
||||
hcs.services[nsn].endpoints = count
|
||||
}
|
||||
for nsn, hci := range hcs.services {
|
||||
if _, found := newEndpoints[nsn]; !found {
|
||||
hci.endpoints = 0
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
455
vendor/k8s.io/kubernetes/pkg/proxy/healthcheck/healthcheck_test.go
generated
vendored
455
vendor/k8s.io/kubernetes/pkg/proxy/healthcheck/healthcheck_test.go
generated
vendored
|
@ -17,142 +17,341 @@ limitations under the License.
|
|||
package healthcheck
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
"encoding/json"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/davecgh/go-spew/spew"
|
||||
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
)
|
||||
|
||||
type TestCaseData struct {
|
||||
nodePorts int
|
||||
numEndpoints int
|
||||
nodePortList []int
|
||||
svcNames []types.NamespacedName
|
||||
type fakeListener struct {
|
||||
openPorts sets.String
|
||||
}
|
||||
|
||||
const (
|
||||
startPort = 20000
|
||||
endPort = 40000
|
||||
)
|
||||
|
||||
var (
|
||||
choices = []byte("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ")
|
||||
)
|
||||
|
||||
func generateRandomString(n int) string {
|
||||
|
||||
b := make([]byte, n)
|
||||
l := len(choices)
|
||||
for i := range b {
|
||||
b[i] = choices[rand.Intn(l)]
|
||||
}
|
||||
return string(b)
|
||||
}
|
||||
|
||||
func chooseServiceName(tc int, hint int) types.NamespacedName {
|
||||
var svc types.NamespacedName
|
||||
svc.Namespace = fmt.Sprintf("ns_%d", tc)
|
||||
svc.Name = fmt.Sprintf("name_%d", hint)
|
||||
return svc
|
||||
}
|
||||
|
||||
func generateEndpointSet(max int) sets.String {
|
||||
s := sets.NewString()
|
||||
for i := 0; i < max; i++ {
|
||||
s.Insert(fmt.Sprintf("%d%s", i, generateRandomString(8)))
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func verifyHealthChecks(tc *TestCaseData, t *testing.T) bool {
|
||||
var success = true
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
for i := 0; i < tc.nodePorts; i++ {
|
||||
t.Logf("Validating HealthCheck works for svc %s nodePort %d\n", tc.svcNames[i], tc.nodePortList[i])
|
||||
res, err := http.Get(fmt.Sprintf("http://127.0.0.1:%d/", tc.nodePortList[i]))
|
||||
if err != nil {
|
||||
t.Logf("ERROR: Failed to connect to listening port")
|
||||
success = false
|
||||
continue
|
||||
}
|
||||
robots, err := ioutil.ReadAll(res.Body)
|
||||
if res.StatusCode == http.StatusServiceUnavailable {
|
||||
t.Logf("ERROR: HealthCheck returned %s: %s", res.Status, string(robots))
|
||||
success = false
|
||||
continue
|
||||
}
|
||||
res.Body.Close()
|
||||
if err != nil {
|
||||
t.Logf("Error: reading body of response (%s)", err)
|
||||
success = false
|
||||
continue
|
||||
}
|
||||
}
|
||||
if success {
|
||||
t.Logf("Success: All nodePorts found active")
|
||||
}
|
||||
return success
|
||||
}
|
||||
|
||||
func TestHealthChecker(t *testing.T) {
|
||||
testcases := []TestCaseData{
|
||||
{
|
||||
nodePorts: 1,
|
||||
numEndpoints: 2,
|
||||
},
|
||||
{
|
||||
nodePorts: 10,
|
||||
numEndpoints: 6,
|
||||
},
|
||||
{
|
||||
nodePorts: 100,
|
||||
numEndpoints: 1,
|
||||
},
|
||||
}
|
||||
|
||||
Run()
|
||||
|
||||
ports := startPort
|
||||
for n, tc := range testcases {
|
||||
tc.nodePortList = make([]int, tc.nodePorts)
|
||||
tc.svcNames = make([]types.NamespacedName, tc.nodePorts)
|
||||
for i := 0; i < tc.nodePorts; i++ {
|
||||
tc.svcNames[i] = chooseServiceName(n, i)
|
||||
t.Logf("Updating endpoints map for %s %d", tc.svcNames[i], tc.numEndpoints)
|
||||
for {
|
||||
UpdateEndpoints(tc.svcNames[i], generateEndpointSet(tc.numEndpoints))
|
||||
tc.nodePortList[i] = ports
|
||||
ports++
|
||||
if AddServiceListener(tc.svcNames[i], tc.nodePortList[i]) {
|
||||
break
|
||||
}
|
||||
DeleteServiceListener(tc.svcNames[i], tc.nodePortList[i])
|
||||
// Keep searching for a port that works
|
||||
t.Logf("Failed to bind/listen on port %d...trying next port", ports-1)
|
||||
if ports > endPort {
|
||||
t.Errorf("Exhausted range of ports available for tests")
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
t.Logf("Validating if all nodePorts for tc %d work", n)
|
||||
if !verifyHealthChecks(&tc, t) {
|
||||
t.Errorf("Healthcheck validation failed")
|
||||
}
|
||||
|
||||
for i := 0; i < tc.nodePorts; i++ {
|
||||
DeleteServiceListener(tc.svcNames[i], tc.nodePortList[i])
|
||||
UpdateEndpoints(tc.svcNames[i], sets.NewString())
|
||||
}
|
||||
|
||||
// Ensure that all listeners have been shutdown
|
||||
if verifyHealthChecks(&tc, t) {
|
||||
t.Errorf("Healthcheck validation failed")
|
||||
}
|
||||
func newFakeListener() *fakeListener {
|
||||
return &fakeListener{
|
||||
openPorts: sets.String{},
|
||||
}
|
||||
}
|
||||
|
||||
func (fake *fakeListener) hasPort(addr string) bool {
|
||||
return fake.openPorts.Has(addr)
|
||||
}
|
||||
|
||||
func (fake *fakeListener) Listen(addr string) (net.Listener, error) {
|
||||
fake.openPorts.Insert(addr)
|
||||
return &fakeNetListener{
|
||||
parent: fake,
|
||||
addr: addr,
|
||||
}, nil
|
||||
}
|
||||
|
||||
type fakeNetListener struct {
|
||||
parent *fakeListener
|
||||
addr string
|
||||
}
|
||||
|
||||
func (fake *fakeNetListener) Accept() (net.Conn, error) {
|
||||
// Not implemented
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (fake *fakeNetListener) Close() error {
|
||||
fake.parent.openPorts.Delete(fake.addr)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (fake *fakeNetListener) Addr() net.Addr {
|
||||
// Not implemented
|
||||
return nil
|
||||
}
|
||||
|
||||
type fakeHTTPServerFactory struct{}
|
||||
|
||||
func newFakeHTTPServerFactory() *fakeHTTPServerFactory {
|
||||
return &fakeHTTPServerFactory{}
|
||||
}
|
||||
|
||||
func (fake *fakeHTTPServerFactory) New(addr string, handler http.Handler) HTTPServer {
|
||||
return &fakeHTTPServer{
|
||||
addr: addr,
|
||||
handler: handler,
|
||||
}
|
||||
}
|
||||
|
||||
type fakeHTTPServer struct {
|
||||
addr string
|
||||
handler http.Handler
|
||||
}
|
||||
|
||||
func (fake *fakeHTTPServer) Serve(listener net.Listener) error {
|
||||
return nil // Cause the goroutine to return
|
||||
}
|
||||
|
||||
func mknsn(ns, name string) types.NamespacedName {
|
||||
return types.NamespacedName{
|
||||
Namespace: ns,
|
||||
Name: name,
|
||||
}
|
||||
}
|
||||
|
||||
type hcPayload struct {
|
||||
Service struct {
|
||||
Namespace string
|
||||
Name string
|
||||
}
|
||||
LocalEndpoints int
|
||||
}
|
||||
|
||||
func TestServer(t *testing.T) {
|
||||
listener := newFakeListener()
|
||||
httpFactory := newFakeHTTPServerFactory()
|
||||
|
||||
hcsi := NewServer("hostname", nil, listener, httpFactory)
|
||||
hcs := hcsi.(*server)
|
||||
if len(hcs.services) != 0 {
|
||||
t.Errorf("expected 0 services, got %d", len(hcs.services))
|
||||
}
|
||||
|
||||
// sync nothing
|
||||
hcs.SyncServices(nil)
|
||||
if len(hcs.services) != 0 {
|
||||
t.Errorf("expected 0 services, got %d", len(hcs.services))
|
||||
}
|
||||
hcs.SyncEndpoints(nil)
|
||||
if len(hcs.services) != 0 {
|
||||
t.Errorf("expected 0 services, got %d", len(hcs.services))
|
||||
}
|
||||
|
||||
// sync unknown endpoints, should be dropped
|
||||
hcs.SyncEndpoints(map[types.NamespacedName]int{mknsn("a", "b"): 93})
|
||||
if len(hcs.services) != 0 {
|
||||
t.Errorf("expected 0 services, got %d", len(hcs.services))
|
||||
}
|
||||
|
||||
// sync a real service
|
||||
nsn := mknsn("a", "b")
|
||||
hcs.SyncServices(map[types.NamespacedName]uint16{nsn: 9376})
|
||||
if len(hcs.services) != 1 {
|
||||
t.Errorf("expected 1 service, got %d", len(hcs.services))
|
||||
}
|
||||
if hcs.services[nsn].endpoints != 0 {
|
||||
t.Errorf("expected 0 endpoints, got %d", hcs.services[nsn].endpoints)
|
||||
}
|
||||
if len(listener.openPorts) != 1 {
|
||||
t.Errorf("expected 1 open port, got %d\n%s", len(listener.openPorts), spew.Sdump(listener.openPorts))
|
||||
}
|
||||
if !listener.hasPort(":9376") {
|
||||
t.Errorf("expected port :9376 to be open\n%s", spew.Sdump(listener.openPorts))
|
||||
}
|
||||
// test the handler
|
||||
testHandler(hcs, nsn, http.StatusServiceUnavailable, 0, t)
|
||||
|
||||
// sync an endpoint
|
||||
hcs.SyncEndpoints(map[types.NamespacedName]int{nsn: 18})
|
||||
if len(hcs.services) != 1 {
|
||||
t.Errorf("expected 1 service, got %d", len(hcs.services))
|
||||
}
|
||||
if hcs.services[nsn].endpoints != 18 {
|
||||
t.Errorf("expected 18 endpoints, got %d", hcs.services[nsn].endpoints)
|
||||
}
|
||||
// test the handler
|
||||
testHandler(hcs, nsn, http.StatusOK, 18, t)
|
||||
|
||||
// sync zero endpoints
|
||||
hcs.SyncEndpoints(map[types.NamespacedName]int{nsn: 0})
|
||||
if len(hcs.services) != 1 {
|
||||
t.Errorf("expected 1 service, got %d", len(hcs.services))
|
||||
}
|
||||
if hcs.services[nsn].endpoints != 0 {
|
||||
t.Errorf("expected 0 endpoints, got %d", hcs.services[nsn].endpoints)
|
||||
}
|
||||
// test the handler
|
||||
testHandler(hcs, nsn, http.StatusServiceUnavailable, 0, t)
|
||||
|
||||
// put the endpoint back
|
||||
hcs.SyncEndpoints(map[types.NamespacedName]int{nsn: 11})
|
||||
if len(hcs.services) != 1 {
|
||||
t.Errorf("expected 1 service, got %d", len(hcs.services))
|
||||
}
|
||||
if hcs.services[nsn].endpoints != 11 {
|
||||
t.Errorf("expected 18 endpoints, got %d", hcs.services[nsn].endpoints)
|
||||
}
|
||||
// sync nil endpoints
|
||||
hcs.SyncEndpoints(nil)
|
||||
if len(hcs.services) != 1 {
|
||||
t.Errorf("expected 1 service, got %d", len(hcs.services))
|
||||
}
|
||||
if hcs.services[nsn].endpoints != 0 {
|
||||
t.Errorf("expected 0 endpoints, got %d", hcs.services[nsn].endpoints)
|
||||
}
|
||||
// test the handler
|
||||
testHandler(hcs, nsn, http.StatusServiceUnavailable, 0, t)
|
||||
|
||||
// put the endpoint back
|
||||
hcs.SyncEndpoints(map[types.NamespacedName]int{nsn: 18})
|
||||
if len(hcs.services) != 1 {
|
||||
t.Errorf("expected 1 service, got %d", len(hcs.services))
|
||||
}
|
||||
if hcs.services[nsn].endpoints != 18 {
|
||||
t.Errorf("expected 18 endpoints, got %d", hcs.services[nsn].endpoints)
|
||||
}
|
||||
// delete the service
|
||||
hcs.SyncServices(nil)
|
||||
if len(hcs.services) != 0 {
|
||||
t.Errorf("expected 0 services, got %d", len(hcs.services))
|
||||
}
|
||||
|
||||
// sync multiple services
|
||||
nsn1 := mknsn("a", "b")
|
||||
nsn2 := mknsn("c", "d")
|
||||
nsn3 := mknsn("e", "f")
|
||||
nsn4 := mknsn("g", "h")
|
||||
hcs.SyncServices(map[types.NamespacedName]uint16{
|
||||
nsn1: 9376,
|
||||
nsn2: 12909,
|
||||
nsn3: 11113,
|
||||
})
|
||||
if len(hcs.services) != 3 {
|
||||
t.Errorf("expected 3 service, got %d", len(hcs.services))
|
||||
}
|
||||
if hcs.services[nsn1].endpoints != 0 {
|
||||
t.Errorf("expected 0 endpoints, got %d", hcs.services[nsn1].endpoints)
|
||||
}
|
||||
if hcs.services[nsn2].endpoints != 0 {
|
||||
t.Errorf("expected 0 endpoints, got %d", hcs.services[nsn2].endpoints)
|
||||
}
|
||||
if hcs.services[nsn3].endpoints != 0 {
|
||||
t.Errorf("expected 0 endpoints, got %d", hcs.services[nsn3].endpoints)
|
||||
}
|
||||
if len(listener.openPorts) != 3 {
|
||||
t.Errorf("expected 3 open ports, got %d\n%s", len(listener.openPorts), spew.Sdump(listener.openPorts))
|
||||
}
|
||||
// test the handlers
|
||||
testHandler(hcs, nsn1, http.StatusServiceUnavailable, 0, t)
|
||||
testHandler(hcs, nsn2, http.StatusServiceUnavailable, 0, t)
|
||||
testHandler(hcs, nsn3, http.StatusServiceUnavailable, 0, t)
|
||||
|
||||
// sync endpoints
|
||||
hcs.SyncEndpoints(map[types.NamespacedName]int{
|
||||
nsn1: 9,
|
||||
nsn2: 3,
|
||||
nsn3: 7,
|
||||
})
|
||||
if len(hcs.services) != 3 {
|
||||
t.Errorf("expected 3 services, got %d", len(hcs.services))
|
||||
}
|
||||
if hcs.services[nsn1].endpoints != 9 {
|
||||
t.Errorf("expected 9 endpoints, got %d", hcs.services[nsn1].endpoints)
|
||||
}
|
||||
if hcs.services[nsn2].endpoints != 3 {
|
||||
t.Errorf("expected 3 endpoints, got %d", hcs.services[nsn2].endpoints)
|
||||
}
|
||||
if hcs.services[nsn3].endpoints != 7 {
|
||||
t.Errorf("expected 7 endpoints, got %d", hcs.services[nsn3].endpoints)
|
||||
}
|
||||
// test the handlers
|
||||
testHandler(hcs, nsn1, http.StatusOK, 9, t)
|
||||
testHandler(hcs, nsn2, http.StatusOK, 3, t)
|
||||
testHandler(hcs, nsn3, http.StatusOK, 7, t)
|
||||
|
||||
// sync new services
|
||||
hcs.SyncServices(map[types.NamespacedName]uint16{
|
||||
//nsn1: 9376, // remove it
|
||||
nsn2: 12909, // leave it
|
||||
nsn3: 11114, // change it
|
||||
nsn4: 11878, // add it
|
||||
})
|
||||
if len(hcs.services) != 3 {
|
||||
t.Errorf("expected 3 service, got %d", len(hcs.services))
|
||||
}
|
||||
if hcs.services[nsn2].endpoints != 3 {
|
||||
t.Errorf("expected 3 endpoints, got %d", hcs.services[nsn2].endpoints)
|
||||
}
|
||||
if hcs.services[nsn3].endpoints != 0 {
|
||||
t.Errorf("expected 0 endpoints, got %d", hcs.services[nsn3].endpoints)
|
||||
}
|
||||
if hcs.services[nsn4].endpoints != 0 {
|
||||
t.Errorf("expected 0 endpoints, got %d", hcs.services[nsn4].endpoints)
|
||||
}
|
||||
// test the handlers
|
||||
testHandler(hcs, nsn2, http.StatusOK, 3, t)
|
||||
testHandler(hcs, nsn3, http.StatusServiceUnavailable, 0, t)
|
||||
testHandler(hcs, nsn4, http.StatusServiceUnavailable, 0, t)
|
||||
|
||||
// sync endpoints
|
||||
hcs.SyncEndpoints(map[types.NamespacedName]int{
|
||||
nsn1: 9,
|
||||
nsn2: 3,
|
||||
nsn3: 7,
|
||||
nsn4: 6,
|
||||
})
|
||||
if len(hcs.services) != 3 {
|
||||
t.Errorf("expected 3 services, got %d", len(hcs.services))
|
||||
}
|
||||
if hcs.services[nsn2].endpoints != 3 {
|
||||
t.Errorf("expected 3 endpoints, got %d", hcs.services[nsn2].endpoints)
|
||||
}
|
||||
if hcs.services[nsn3].endpoints != 7 {
|
||||
t.Errorf("expected 7 endpoints, got %d", hcs.services[nsn3].endpoints)
|
||||
}
|
||||
if hcs.services[nsn4].endpoints != 6 {
|
||||
t.Errorf("expected 6 endpoints, got %d", hcs.services[nsn4].endpoints)
|
||||
}
|
||||
// test the handlers
|
||||
testHandler(hcs, nsn2, http.StatusOK, 3, t)
|
||||
testHandler(hcs, nsn3, http.StatusOK, 7, t)
|
||||
testHandler(hcs, nsn4, http.StatusOK, 6, t)
|
||||
|
||||
// sync endpoints, missing nsn2
|
||||
hcs.SyncEndpoints(map[types.NamespacedName]int{
|
||||
nsn3: 7,
|
||||
nsn4: 6,
|
||||
})
|
||||
if len(hcs.services) != 3 {
|
||||
t.Errorf("expected 3 services, got %d", len(hcs.services))
|
||||
}
|
||||
if hcs.services[nsn2].endpoints != 0 {
|
||||
t.Errorf("expected 0 endpoints, got %d", hcs.services[nsn2].endpoints)
|
||||
}
|
||||
if hcs.services[nsn3].endpoints != 7 {
|
||||
t.Errorf("expected 7 endpoints, got %d", hcs.services[nsn3].endpoints)
|
||||
}
|
||||
if hcs.services[nsn4].endpoints != 6 {
|
||||
t.Errorf("expected 6 endpoints, got %d", hcs.services[nsn4].endpoints)
|
||||
}
|
||||
// test the handlers
|
||||
testHandler(hcs, nsn2, http.StatusServiceUnavailable, 0, t)
|
||||
testHandler(hcs, nsn3, http.StatusOK, 7, t)
|
||||
testHandler(hcs, nsn4, http.StatusOK, 6, t)
|
||||
}
|
||||
|
||||
func testHandler(hcs *server, nsn types.NamespacedName, status int, endpoints int, t *testing.T) {
|
||||
handler := hcs.services[nsn].server.(*fakeHTTPServer).handler
|
||||
req, err := http.NewRequest("GET", "/healthz", nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
resp := httptest.NewRecorder()
|
||||
|
||||
handler.ServeHTTP(resp, req)
|
||||
|
||||
if resp.Code != status {
|
||||
t.Errorf("expected status code %v, got %v", status, resp.Code)
|
||||
}
|
||||
var payload hcPayload
|
||||
if err := json.Unmarshal(resp.Body.Bytes(), &payload); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if payload.Service.Name != nsn.Name || payload.Service.Namespace != nsn.Namespace {
|
||||
t.Errorf("expected payload name %q, got %v", nsn.String(), payload.Service)
|
||||
}
|
||||
if payload.LocalEndpoints != endpoints {
|
||||
t.Errorf("expected %d endpoints, got %d", endpoints, payload.LocalEndpoints)
|
||||
}
|
||||
}
|
||||
|
|
46
vendor/k8s.io/kubernetes/pkg/proxy/healthcheck/http.go
generated
vendored
46
vendor/k8s.io/kubernetes/pkg/proxy/healthcheck/http.go
generated
vendored
|
@ -1,46 +0,0 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package healthcheck
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
// A healthCheckHandler serves http requests on /healthz on the service health check node port,
|
||||
// and responds to every request with either:
|
||||
// 200 OK and the count of endpoints for the given service that are local to this node.
|
||||
// or
|
||||
// 503 Service Unavailable If the count is zero or the service does not exist
|
||||
type healthCheckHandler struct {
|
||||
svcNsName string
|
||||
}
|
||||
|
||||
// HTTP Utility function to send the required statusCode and error text to a http.ResponseWriter object
|
||||
func sendHealthCheckResponse(rw http.ResponseWriter, statusCode int, error string) {
|
||||
rw.Header().Set("Content-Type", "text/plain")
|
||||
rw.WriteHeader(statusCode)
|
||||
fmt.Fprint(rw, error)
|
||||
}
|
||||
|
||||
// ServeHTTP: Interface callback method for net.Listener Handlers
|
||||
func (h healthCheckHandler) ServeHTTP(response http.ResponseWriter, req *http.Request) {
|
||||
glog.V(4).Infof("Received HC Request Service %s from Cloud Load Balancer", h.svcNsName)
|
||||
healthchecker.handleHealthCheckRequest(response, h.svcNsName)
|
||||
}
|
77
vendor/k8s.io/kubernetes/pkg/proxy/healthcheck/listener.go
generated
vendored
77
vendor/k8s.io/kubernetes/pkg/proxy/healthcheck/listener.go
generated
vendored
|
@ -1,77 +0,0 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package healthcheck
|
||||
|
||||
// Create/Delete dynamic listeners on the required nodePorts
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
// handleServiceListenerRequest: receive requests to add/remove service health check listening ports
|
||||
func (h *proxyHC) handleServiceListenerRequest(req *proxyListenerRequest) bool {
|
||||
sr, serviceFound := h.serviceResponderMap[req.serviceName]
|
||||
if !req.add {
|
||||
if !serviceFound {
|
||||
return false
|
||||
}
|
||||
glog.Infof("Deleting HealthCheckListenPort for service %s port %d",
|
||||
req.serviceName, req.listenPort)
|
||||
delete(h.serviceResponderMap, req.serviceName)
|
||||
(*sr.listener).Close()
|
||||
return true
|
||||
} else if serviceFound {
|
||||
if req.listenPort == sr.listenPort {
|
||||
// Addition requested but responder for service already exists and port is unchanged
|
||||
return true
|
||||
}
|
||||
// Addition requested but responder for service already exists but the listen port has changed
|
||||
glog.Infof("HealthCheckListenPort for service %s changed from %d to %d - closing old listening port",
|
||||
req.serviceName, sr.listenPort, req.listenPort)
|
||||
delete(h.serviceResponderMap, req.serviceName)
|
||||
(*sr.listener).Close()
|
||||
}
|
||||
// Create a service responder object and start listening and serving on the provided port
|
||||
glog.V(2).Infof("Adding health check listener for service %s on nodePort %d", req.serviceName, req.listenPort)
|
||||
server := http.Server{
|
||||
Addr: fmt.Sprintf(":%d", req.listenPort),
|
||||
Handler: healthCheckHandler{svcNsName: req.serviceName.String()},
|
||||
}
|
||||
listener, err := net.Listen("tcp", server.Addr)
|
||||
if err != nil {
|
||||
glog.Warningf("FAILED to listen on address %s (%s)\n", server.Addr, err)
|
||||
return false
|
||||
}
|
||||
h.serviceResponderMap[req.serviceName] = serviceResponder{serviceName: req.serviceName,
|
||||
listenPort: req.listenPort,
|
||||
listener: &listener,
|
||||
server: &server}
|
||||
go func() {
|
||||
// Anonymous goroutine to block on Serve for this listen port - Serve will exit when the listener is closed
|
||||
glog.V(3).Infof("Goroutine blocking on serving health checks for %s on port %d", req.serviceName, req.listenPort)
|
||||
if err := server.Serve(listener); err != nil {
|
||||
glog.V(3).Infof("Proxy HealthCheck listen socket %d for service %s closed with error %s\n", req.listenPort, req.serviceName, err)
|
||||
return
|
||||
}
|
||||
glog.V(3).Infof("Proxy HealthCheck listen socket %d for service %s closed\n", req.listenPort, req.serviceName)
|
||||
}()
|
||||
return true
|
||||
}
|
53
vendor/k8s.io/kubernetes/pkg/proxy/healthcheck/worker.go
generated
vendored
53
vendor/k8s.io/kubernetes/pkg/proxy/healthcheck/worker.go
generated
vendored
|
@ -1,53 +0,0 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package healthcheck LoadBalancer Healthcheck responder library for kubernetes network proxies
|
||||
package healthcheck // import "k8s.io/kubernetes/pkg/proxy/healthcheck"
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
var healthchecker *proxyHC
|
||||
|
||||
// handlerLoop Serializes all requests to prevent concurrent access to the maps
|
||||
func (h *proxyHC) handlerLoop() {
|
||||
ticker := time.NewTicker(1 * time.Minute)
|
||||
defer ticker.Stop()
|
||||
for {
|
||||
select {
|
||||
case req := <-h.mutationRequestChannel:
|
||||
h.handleMutationRequest(req)
|
||||
case req := <-h.listenerRequestChannel:
|
||||
req.responseChannel <- h.handleServiceListenerRequest(req)
|
||||
case <-ticker.C:
|
||||
go h.sync()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (h *proxyHC) sync() {
|
||||
glog.V(4).Infof("%d Health Check Listeners", len(h.serviceResponderMap))
|
||||
glog.V(4).Infof("%d Services registered for health checking", len(h.serviceEndpointsMap.List()))
|
||||
for _, svc := range h.serviceEndpointsMap.ListKeys() {
|
||||
if e, ok := h.serviceEndpointsMap.Get(svc); ok {
|
||||
endpointList := e.(*serviceEndpointsList)
|
||||
glog.V(4).Infof("Service %s has %d local endpoints", svc, endpointList.endpoints.Len())
|
||||
}
|
||||
}
|
||||
}
|
1
vendor/k8s.io/kubernetes/pkg/proxy/iptables/BUILD
generated
vendored
1
vendor/k8s.io/kubernetes/pkg/proxy/iptables/BUILD
generated
vendored
|
@ -50,7 +50,6 @@ go_test(
|
|||
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
|
||||
"//vendor:k8s.io/apimachinery/pkg/types",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/intstr",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/sets",
|
||||
],
|
||||
)
|
||||
|
||||
|
|
130
vendor/k8s.io/kubernetes/pkg/proxy/iptables/proxier.go
generated
vendored
130
vendor/k8s.io/kubernetes/pkg/proxy/iptables/proxier.go
generated
vendored
|
@ -213,7 +213,7 @@ type Proxier struct {
|
|||
nodeIP net.IP
|
||||
portMapper portOpener
|
||||
recorder record.EventRecorder
|
||||
healthChecker healthChecker
|
||||
healthChecker healthcheck.Server
|
||||
}
|
||||
|
||||
type localPort struct {
|
||||
|
@ -245,17 +245,6 @@ func (l *listenPortOpener) OpenLocalPort(lp *localPort) (closeable, error) {
|
|||
return openLocalPort(lp)
|
||||
}
|
||||
|
||||
type healthChecker interface {
|
||||
UpdateEndpoints(serviceName types.NamespacedName, endpointUIDs sets.String)
|
||||
}
|
||||
|
||||
// TODO: the healthcheck pkg should offer a type
|
||||
type globalHealthChecker struct{}
|
||||
|
||||
func (globalHealthChecker) UpdateEndpoints(serviceName types.NamespacedName, endpointUIDs sets.String) {
|
||||
healthcheck.UpdateEndpoints(serviceName, endpointUIDs)
|
||||
}
|
||||
|
||||
// Proxier implements ProxyProvider
|
||||
var _ proxy.ProxyProvider = &Proxier{}
|
||||
|
||||
|
@ -309,8 +298,7 @@ func NewProxier(ipt utiliptables.Interface,
|
|||
glog.Warningf("clusterCIDR not specified, unable to distinguish between internal and external traffic")
|
||||
}
|
||||
|
||||
healthChecker := globalHealthChecker{}
|
||||
go healthcheck.Run()
|
||||
healthChecker := healthcheck.NewServer(hostname, recorder, nil, nil) // use default implementations of deps
|
||||
|
||||
var throttle flowcontrol.RateLimiter
|
||||
// Defaulting back to not limit sync rate when minSyncPeriod is 0.
|
||||
|
@ -444,18 +432,12 @@ func (proxier *Proxier) SyncLoop() {
|
|||
}
|
||||
}
|
||||
|
||||
type healthCheckPort struct {
|
||||
namespace types.NamespacedName
|
||||
nodeport int
|
||||
}
|
||||
|
||||
// Accepts a list of Services and the existing service map. Returns the new
|
||||
// service map, a list of healthcheck ports to add to or remove from the health
|
||||
// checking listener service, and a set of stale UDP services.
|
||||
func buildServiceMap(allServices []api.Service, oldServiceMap proxyServiceMap) (proxyServiceMap, []healthCheckPort, []healthCheckPort, sets.String) {
|
||||
// service map, a map of healthcheck ports, and a set of stale UDP
|
||||
// services.
|
||||
func buildServiceMap(allServices []api.Service, oldServiceMap proxyServiceMap) (proxyServiceMap, map[types.NamespacedName]uint16, sets.String) {
|
||||
newServiceMap := make(proxyServiceMap)
|
||||
healthCheckAdd := make([]healthCheckPort, 0)
|
||||
healthCheckDel := make([]healthCheckPort, 0)
|
||||
hcPorts := make(map[types.NamespacedName]uint16)
|
||||
|
||||
for i := range allServices {
|
||||
service := &allServices[i]
|
||||
|
@ -492,12 +474,8 @@ func buildServiceMap(allServices []api.Service, oldServiceMap proxyServiceMap) (
|
|||
glog.V(1).Infof("Updating existing service %q at %s:%d/%s", serviceName, info.clusterIP, servicePort.Port, servicePort.Protocol)
|
||||
}
|
||||
|
||||
if !exists || !equal {
|
||||
if info.onlyNodeLocalEndpoints && info.healthCheckNodePort > 0 {
|
||||
healthCheckAdd = append(healthCheckAdd, healthCheckPort{serviceName.NamespacedName, info.healthCheckNodePort})
|
||||
} else {
|
||||
healthCheckDel = append(healthCheckDel, healthCheckPort{serviceName.NamespacedName, 0})
|
||||
}
|
||||
if info.onlyNodeLocalEndpoints {
|
||||
hcPorts[svcName] = uint16(info.healthCheckNodePort)
|
||||
}
|
||||
|
||||
newServiceMap[serviceName] = info
|
||||
|
@ -505,6 +483,13 @@ func buildServiceMap(allServices []api.Service, oldServiceMap proxyServiceMap) (
|
|||
}
|
||||
}
|
||||
|
||||
for nsn, port := range hcPorts {
|
||||
if port == 0 {
|
||||
glog.Errorf("Service %q has no healthcheck nodeport", nsn)
|
||||
delete(hcPorts, nsn)
|
||||
}
|
||||
}
|
||||
|
||||
staleUDPServices := sets.NewString()
|
||||
// Remove serviceports missing from the update.
|
||||
for name, info := range oldServiceMap {
|
||||
|
@ -513,13 +498,10 @@ func buildServiceMap(allServices []api.Service, oldServiceMap proxyServiceMap) (
|
|||
if info.protocol == api.ProtocolUDP {
|
||||
staleUDPServices.Insert(info.clusterIP.String())
|
||||
}
|
||||
if info.onlyNodeLocalEndpoints && info.healthCheckNodePort > 0 {
|
||||
healthCheckDel = append(healthCheckDel, healthCheckPort{name.NamespacedName, info.healthCheckNodePort})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return newServiceMap, healthCheckAdd, healthCheckDel, staleUDPServices
|
||||
return newServiceMap, hcPorts, staleUDPServices
|
||||
}
|
||||
|
||||
// OnServiceUpdate tracks the active set of service proxies.
|
||||
|
@ -533,19 +515,11 @@ func (proxier *Proxier) OnServiceUpdate(allServices []api.Service) {
|
|||
defer proxier.mu.Unlock()
|
||||
proxier.haveReceivedServiceUpdate = true
|
||||
|
||||
newServiceMap, hcAdd, hcDel, staleUDPServices := buildServiceMap(allServices, proxier.serviceMap)
|
||||
for _, hc := range hcAdd {
|
||||
glog.V(4).Infof("Adding health check for %+v, port %v", hc.namespace, hc.nodeport)
|
||||
// Turn on healthcheck responder to listen on the health check nodePort
|
||||
// FIXME: handle failures from adding the service
|
||||
healthcheck.AddServiceListener(hc.namespace, hc.nodeport)
|
||||
}
|
||||
for _, hc := range hcDel {
|
||||
// Remove ServiceListener health check nodePorts from the health checker
|
||||
// TODO - Stats
|
||||
glog.V(4).Infof("Deleting health check for %+v, port %v", hc.namespace, hc.nodeport)
|
||||
// FIXME: handle failures from deleting the service
|
||||
healthcheck.DeleteServiceListener(hc.namespace, hc.nodeport)
|
||||
newServiceMap, hcPorts, staleUDPServices := buildServiceMap(allServices, proxier.serviceMap)
|
||||
|
||||
// update healthcheck ports
|
||||
if err := proxier.healthChecker.SyncServices(hcPorts); err != nil {
|
||||
glog.Errorf("Error syncing healtcheck ports: %v", err)
|
||||
}
|
||||
|
||||
if len(newServiceMap) != len(proxier.serviceMap) || !reflect.DeepEqual(newServiceMap, proxier.serviceMap) {
|
||||
|
@ -568,7 +542,13 @@ func (proxier *Proxier) OnEndpointsUpdate(allEndpoints []api.Endpoints) {
|
|||
proxier.allEndpoints = allEndpoints
|
||||
|
||||
// TODO: once service has made this same transform, move this into proxier.syncProxyRules()
|
||||
newMap, staleConnections := updateEndpoints(proxier.allEndpoints, proxier.endpointsMap, proxier.hostname, proxier.healthChecker)
|
||||
newMap, hcEndpoints, staleConnections := updateEndpoints(proxier.allEndpoints, proxier.endpointsMap, proxier.hostname)
|
||||
|
||||
// update healthcheck endpoints
|
||||
if err := proxier.healthChecker.SyncEndpoints(hcEndpoints); err != nil {
|
||||
glog.Errorf("Error syncing healthcheck endoints: %v", err)
|
||||
}
|
||||
|
||||
if len(newMap) != len(proxier.endpointsMap) || !reflect.DeepEqual(newMap, proxier.endpointsMap) {
|
||||
proxier.endpointsMap = newMap
|
||||
proxier.syncProxyRules()
|
||||
|
@ -580,11 +560,11 @@ func (proxier *Proxier) OnEndpointsUpdate(allEndpoints []api.Endpoints) {
|
|||
}
|
||||
|
||||
// Convert a slice of api.Endpoints objects into a map of service-port -> endpoints.
|
||||
func updateEndpoints(allEndpoints []api.Endpoints, curMap proxyEndpointMap, hostname string,
|
||||
healthChecker healthChecker) (newMap proxyEndpointMap, staleSet map[endpointServicePair]bool) {
|
||||
func updateEndpoints(allEndpoints []api.Endpoints, curMap proxyEndpointMap, hostname string) (newMap proxyEndpointMap, hcEndpoints map[types.NamespacedName]int, staleSet map[endpointServicePair]bool) {
|
||||
|
||||
// return values
|
||||
newMap = make(proxyEndpointMap)
|
||||
hcEndpoints = make(map[types.NamespacedName]int)
|
||||
staleSet = make(map[endpointServicePair]bool)
|
||||
|
||||
// Update endpoints for services.
|
||||
|
@ -610,19 +590,30 @@ func updateEndpoints(allEndpoints []api.Endpoints, curMap proxyEndpointMap, host
|
|||
}
|
||||
}
|
||||
|
||||
// Update service health check
|
||||
allSvcPorts := make(map[proxy.ServicePortName]bool)
|
||||
for svcPort := range curMap {
|
||||
allSvcPorts[svcPort] = true
|
||||
}
|
||||
for svcPort := range newMap {
|
||||
allSvcPorts[svcPort] = true
|
||||
}
|
||||
for svcPort := range allSvcPorts {
|
||||
updateHealthCheckEntries(svcPort.NamespacedName, newMap[svcPort], healthChecker)
|
||||
if !utilfeature.DefaultFeatureGate.Enabled(features.ExternalTrafficLocalOnly) {
|
||||
return
|
||||
}
|
||||
|
||||
return newMap, staleSet
|
||||
// accumulate local IPs per service, ignoring ports
|
||||
localIPs := map[types.NamespacedName]sets.String{}
|
||||
for svcPort := range newMap {
|
||||
for _, ep := range newMap[svcPort] {
|
||||
if ep.isLocal {
|
||||
nsn := svcPort.NamespacedName
|
||||
if localIPs[nsn] == nil {
|
||||
localIPs[nsn] = sets.NewString()
|
||||
}
|
||||
ip := strings.Split(ep.endpoint, ":")[0] // just the IP part
|
||||
localIPs[nsn].Insert(ip)
|
||||
}
|
||||
}
|
||||
}
|
||||
// produce a count per service
|
||||
for nsn, ips := range localIPs {
|
||||
hcEndpoints[nsn] = len(ips)
|
||||
}
|
||||
|
||||
return newMap, hcEndpoints, staleSet
|
||||
}
|
||||
|
||||
// Gather information about all the endpoint state for a given api.Endpoints.
|
||||
|
@ -668,23 +659,6 @@ func accumulateEndpointsMap(endpoints *api.Endpoints, hostname string,
|
|||
}
|
||||
}
|
||||
|
||||
// updateHealthCheckEntries - send the new set of local endpoints to the health checker
|
||||
func updateHealthCheckEntries(name types.NamespacedName, endpoints []*endpointsInfo, healthChecker healthChecker) {
|
||||
if !utilfeature.DefaultFeatureGate.Enabled(features.ExternalTrafficLocalOnly) {
|
||||
return
|
||||
}
|
||||
|
||||
// Use a set instead of a slice to provide deduplication
|
||||
epSet := sets.NewString()
|
||||
for _, portInfo := range endpoints {
|
||||
if portInfo.isLocal {
|
||||
// kube-proxy health check only needs local endpoints
|
||||
epSet.Insert(fmt.Sprintf("%s/%s", name.Namespace, name.Name))
|
||||
}
|
||||
}
|
||||
healthChecker.UpdateEndpoints(name, epSet)
|
||||
}
|
||||
|
||||
// portProtoHash takes the ServicePortName and protocol for a service
|
||||
// returns the associated 16 character hash. This is computed by hashing (sha256)
|
||||
// then encoding to base32 and truncating to 16 chars. We do this because IPTables
|
||||
|
|
284
vendor/k8s.io/kubernetes/pkg/proxy/iptables/proxier_test.go
generated
vendored
284
vendor/k8s.io/kubernetes/pkg/proxy/iptables/proxier_test.go
generated
vendored
|
@ -17,6 +17,7 @@ limitations under the License.
|
|||
package iptables
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
|
@ -29,7 +30,6 @@ import (
|
|||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/service"
|
||||
"k8s.io/kubernetes/pkg/proxy"
|
||||
|
@ -355,9 +355,27 @@ func (f *fakePortOpener) OpenLocalPort(lp *localPort) (closeable, error) {
|
|||
return nil, nil
|
||||
}
|
||||
|
||||
type fakeHealthChecker struct{}
|
||||
type fakeHealthChecker struct {
|
||||
services map[types.NamespacedName]uint16
|
||||
endpoints map[types.NamespacedName]int
|
||||
}
|
||||
|
||||
func (fakeHealthChecker) UpdateEndpoints(serviceName types.NamespacedName, endpointUIDs sets.String) {}
|
||||
func newFakeHealthChecker() *fakeHealthChecker {
|
||||
return &fakeHealthChecker{
|
||||
services: map[types.NamespacedName]uint16{},
|
||||
endpoints: map[types.NamespacedName]int{},
|
||||
}
|
||||
}
|
||||
|
||||
func (fake *fakeHealthChecker) SyncServices(newServices map[types.NamespacedName]uint16) error {
|
||||
fake.services = newServices
|
||||
return nil
|
||||
}
|
||||
|
||||
func (fake *fakeHealthChecker) SyncEndpoints(newEndpoints map[types.NamespacedName]int) error {
|
||||
fake.endpoints = newEndpoints
|
||||
return nil
|
||||
}
|
||||
|
||||
const testHostname = "test-hostname"
|
||||
|
||||
|
@ -374,7 +392,7 @@ func NewFakeProxier(ipt utiliptables.Interface) *Proxier {
|
|||
hostname: testHostname,
|
||||
portsMap: make(map[localPort]closeable),
|
||||
portMapper: &fakePortOpener{[]*localPort{}},
|
||||
healthChecker: fakeHealthChecker{},
|
||||
healthChecker: newFakeHealthChecker(),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -926,30 +944,18 @@ func TestBuildServiceMapAddRemove(t *testing.T) {
|
|||
}),
|
||||
}
|
||||
|
||||
serviceMap, hcAdd, hcDel, staleUDPServices := buildServiceMap(services, make(proxyServiceMap))
|
||||
serviceMap, hcPorts, staleUDPServices := buildServiceMap(services, make(proxyServiceMap))
|
||||
if len(serviceMap) != 8 {
|
||||
t.Errorf("expected service map length 8, got %v", serviceMap)
|
||||
}
|
||||
|
||||
// The only-local-loadbalancer ones get added
|
||||
if len(hcAdd) != 2 {
|
||||
t.Errorf("expected healthcheck add length 2, got %v", hcAdd)
|
||||
if len(hcPorts) != 1 {
|
||||
t.Errorf("expected 1 healthcheck port, got %v", hcPorts)
|
||||
} else {
|
||||
for _, hc := range hcAdd {
|
||||
if hc.namespace.Namespace != "somewhere" || hc.namespace.Name != "only-local-load-balancer" {
|
||||
t.Errorf("unexpected healthcheck listener added: %v", hc)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// All the rest get deleted
|
||||
if len(hcDel) != 6 {
|
||||
t.Errorf("expected healthcheck del length 6, got %v", hcDel)
|
||||
} else {
|
||||
for _, hc := range hcDel {
|
||||
if hc.namespace.Namespace == "somewhere" && hc.namespace.Name == "only-local-load-balancer" {
|
||||
t.Errorf("unexpected healthcheck listener deleted: %v", hc)
|
||||
}
|
||||
nsn := makeNSN("somewhere", "only-local-load-balancer")
|
||||
if port, found := hcPorts[nsn]; !found || port != 345 {
|
||||
t.Errorf("expected healthcheck port [%q]=345: got %v", nsn, hcPorts)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -961,27 +967,13 @@ func TestBuildServiceMapAddRemove(t *testing.T) {
|
|||
// Remove some stuff
|
||||
services = []api.Service{services[0]}
|
||||
services[0].Spec.Ports = []api.ServicePort{services[0].Spec.Ports[1]}
|
||||
serviceMap, hcAdd, hcDel, staleUDPServices = buildServiceMap(services, serviceMap)
|
||||
serviceMap, hcPorts, staleUDPServices = buildServiceMap(services, serviceMap)
|
||||
if len(serviceMap) != 1 {
|
||||
t.Errorf("expected service map length 1, got %v", serviceMap)
|
||||
}
|
||||
|
||||
if len(hcAdd) != 0 {
|
||||
t.Errorf("expected healthcheck add length 1, got %v", hcAdd)
|
||||
}
|
||||
|
||||
// The only OnlyLocal annotation was removed above, so we expect a delete now.
|
||||
// FIXME: Since the BetaAnnotationHealthCheckNodePort is the same for all
|
||||
// ServicePorts, we'll get one delete per ServicePort, even though they all
|
||||
// contain the same information
|
||||
if len(hcDel) != 2 {
|
||||
t.Errorf("expected healthcheck del length 2, got %v", hcDel)
|
||||
} else {
|
||||
for _, hc := range hcDel {
|
||||
if hc.namespace.Namespace != "somewhere" || hc.namespace.Name != "only-local-load-balancer" {
|
||||
t.Errorf("unexpected healthcheck listener deleted: %v", hc)
|
||||
}
|
||||
}
|
||||
if len(hcPorts) != 0 {
|
||||
t.Errorf("expected healthcheck ports length 1, got %v", hcPorts)
|
||||
}
|
||||
|
||||
// All services but one were deleted. While you'd expect only the ClusterIPs
|
||||
|
@ -1008,17 +1000,14 @@ func TestBuildServiceMapServiceHeadless(t *testing.T) {
|
|||
}
|
||||
|
||||
// Headless service should be ignored
|
||||
serviceMap, hcAdd, hcDel, staleUDPServices := buildServiceMap(services, make(proxyServiceMap))
|
||||
serviceMap, hcPorts, staleUDPServices := buildServiceMap(services, make(proxyServiceMap))
|
||||
if len(serviceMap) != 0 {
|
||||
t.Errorf("expected service map length 0, got %d", len(serviceMap))
|
||||
}
|
||||
|
||||
// No proxied services, so no healthchecks
|
||||
if len(hcAdd) != 0 {
|
||||
t.Errorf("expected healthcheck add length 0, got %d", len(hcAdd))
|
||||
}
|
||||
if len(hcDel) != 0 {
|
||||
t.Errorf("expected healthcheck del length 0, got %d", len(hcDel))
|
||||
if len(hcPorts) != 0 {
|
||||
t.Errorf("expected healthcheck ports length 0, got %d", len(hcPorts))
|
||||
}
|
||||
|
||||
if len(staleUDPServices) != 0 {
|
||||
|
@ -1036,16 +1025,13 @@ func TestBuildServiceMapServiceTypeExternalName(t *testing.T) {
|
|||
}),
|
||||
}
|
||||
|
||||
serviceMap, hcAdd, hcDel, staleUDPServices := buildServiceMap(services, make(proxyServiceMap))
|
||||
serviceMap, hcPorts, staleUDPServices := buildServiceMap(services, make(proxyServiceMap))
|
||||
if len(serviceMap) != 0 {
|
||||
t.Errorf("expected service map length 0, got %v", serviceMap)
|
||||
}
|
||||
// No proxied services, so no healthchecks
|
||||
if len(hcAdd) != 0 {
|
||||
t.Errorf("expected healthcheck add length 0, got %v", hcAdd)
|
||||
}
|
||||
if len(hcDel) != 0 {
|
||||
t.Errorf("expected healthcheck del length 0, got %v", hcDel)
|
||||
if len(hcPorts) != 0 {
|
||||
t.Errorf("expected healthcheck ports length 0, got %v", hcPorts)
|
||||
}
|
||||
if len(staleUDPServices) != 0 {
|
||||
t.Errorf("expected stale UDP services length 0, got %v", staleUDPServices)
|
||||
|
@ -1081,15 +1067,12 @@ func TestBuildServiceMapServiceUpdate(t *testing.T) {
|
|||
}),
|
||||
}
|
||||
|
||||
serviceMap, hcAdd, hcDel, staleUDPServices := buildServiceMap(first, make(proxyServiceMap))
|
||||
serviceMap, hcPorts, staleUDPServices := buildServiceMap(first, make(proxyServiceMap))
|
||||
if len(serviceMap) != 2 {
|
||||
t.Errorf("expected service map length 2, got %v", serviceMap)
|
||||
}
|
||||
if len(hcAdd) != 0 {
|
||||
t.Errorf("expected healthcheck add length 0, got %v", hcAdd)
|
||||
}
|
||||
if len(hcDel) != 2 {
|
||||
t.Errorf("expected healthcheck del length 2, got %v", hcDel)
|
||||
if len(hcPorts) != 0 {
|
||||
t.Errorf("expected healthcheck ports length 0, got %v", hcPorts)
|
||||
}
|
||||
if len(staleUDPServices) != 0 {
|
||||
// Services only added, so nothing stale yet
|
||||
|
@ -1097,15 +1080,12 @@ func TestBuildServiceMapServiceUpdate(t *testing.T) {
|
|||
}
|
||||
|
||||
// Change service to load-balancer
|
||||
serviceMap, hcAdd, hcDel, staleUDPServices = buildServiceMap(second, serviceMap)
|
||||
serviceMap, hcPorts, staleUDPServices = buildServiceMap(second, serviceMap)
|
||||
if len(serviceMap) != 2 {
|
||||
t.Errorf("expected service map length 2, got %v", serviceMap)
|
||||
}
|
||||
if len(hcAdd) != 2 {
|
||||
t.Errorf("expected healthcheck add length 2, got %v", hcAdd)
|
||||
}
|
||||
if len(hcDel) != 0 {
|
||||
t.Errorf("expected healthcheck add length 2, got %v", hcDel)
|
||||
if len(hcPorts) != 1 {
|
||||
t.Errorf("expected healthcheck ports length 1, got %v", hcPorts)
|
||||
}
|
||||
if len(staleUDPServices) != 0 {
|
||||
t.Errorf("expected stale UDP services length 0, got %v", staleUDPServices.List())
|
||||
|
@ -1113,30 +1093,24 @@ func TestBuildServiceMapServiceUpdate(t *testing.T) {
|
|||
|
||||
// No change; make sure the service map stays the same and there are
|
||||
// no health-check changes
|
||||
serviceMap, hcAdd, hcDel, staleUDPServices = buildServiceMap(second, serviceMap)
|
||||
serviceMap, hcPorts, staleUDPServices = buildServiceMap(second, serviceMap)
|
||||
if len(serviceMap) != 2 {
|
||||
t.Errorf("expected service map length 2, got %v", serviceMap)
|
||||
}
|
||||
if len(hcAdd) != 0 {
|
||||
t.Errorf("expected healthcheck add length 0, got %v", hcAdd)
|
||||
}
|
||||
if len(hcDel) != 0 {
|
||||
t.Errorf("expected healthcheck add length 2, got %v", hcDel)
|
||||
if len(hcPorts) != 1 {
|
||||
t.Errorf("expected healthcheck ports length 1, got %v", hcPorts)
|
||||
}
|
||||
if len(staleUDPServices) != 0 {
|
||||
t.Errorf("expected stale UDP services length 0, got %v", staleUDPServices.List())
|
||||
}
|
||||
|
||||
// And back to ClusterIP
|
||||
serviceMap, hcAdd, hcDel, staleUDPServices = buildServiceMap(first, serviceMap)
|
||||
serviceMap, hcPorts, staleUDPServices = buildServiceMap(first, serviceMap)
|
||||
if len(serviceMap) != 2 {
|
||||
t.Errorf("expected service map length 2, got %v", serviceMap)
|
||||
}
|
||||
if len(hcAdd) != 0 {
|
||||
t.Errorf("expected healthcheck add length 0, got %v", hcAdd)
|
||||
}
|
||||
if len(hcDel) != 2 {
|
||||
t.Errorf("expected healthcheck del length 2, got %v", hcDel)
|
||||
if len(hcPorts) != 0 {
|
||||
t.Errorf("expected healthcheck ports length 0, got %v", hcPorts)
|
||||
}
|
||||
if len(staleUDPServices) != 0 {
|
||||
// Services only added, so nothing stale yet
|
||||
|
@ -1386,28 +1360,33 @@ func makeTestEndpoints(namespace, name string, eptFunc func(*api.Endpoints)) api
|
|||
return ept
|
||||
}
|
||||
|
||||
func makeNSN(namespace, name string) types.NamespacedName {
|
||||
return types.NamespacedName{Namespace: namespace, Name: name}
|
||||
}
|
||||
|
||||
func makeServicePortName(ns, name, port string) proxy.ServicePortName {
|
||||
return proxy.ServicePortName{
|
||||
NamespacedName: types.NamespacedName{
|
||||
Namespace: ns,
|
||||
Name: name,
|
||||
},
|
||||
Port: port,
|
||||
NamespacedName: makeNSN(ns, name),
|
||||
Port: port,
|
||||
}
|
||||
}
|
||||
|
||||
func Test_updateEndpoints(t *testing.T) {
|
||||
var nodeName = "host"
|
||||
|
||||
testCases := []struct {
|
||||
newEndpoints []api.Endpoints
|
||||
oldEndpoints map[proxy.ServicePortName][]*endpointsInfo
|
||||
expectedResult map[proxy.ServicePortName][]*endpointsInfo
|
||||
expectedStale []endpointServicePair
|
||||
newEndpoints []api.Endpoints
|
||||
oldEndpoints map[proxy.ServicePortName][]*endpointsInfo
|
||||
expectedResult map[proxy.ServicePortName][]*endpointsInfo
|
||||
expectedStale []endpointServicePair
|
||||
expectedHealthchecks map[types.NamespacedName]int
|
||||
}{{
|
||||
// Case[0]: nothing
|
||||
newEndpoints: []api.Endpoints{},
|
||||
oldEndpoints: map[proxy.ServicePortName][]*endpointsInfo{},
|
||||
expectedResult: map[proxy.ServicePortName][]*endpointsInfo{},
|
||||
expectedStale: []endpointServicePair{},
|
||||
newEndpoints: []api.Endpoints{},
|
||||
oldEndpoints: map[proxy.ServicePortName][]*endpointsInfo{},
|
||||
expectedResult: map[proxy.ServicePortName][]*endpointsInfo{},
|
||||
expectedStale: []endpointServicePair{},
|
||||
expectedHealthchecks: map[types.NamespacedName]int{},
|
||||
}, {
|
||||
// Case[1]: no change, unnamed port
|
||||
newEndpoints: []api.Endpoints{
|
||||
|
@ -1432,14 +1411,16 @@ func Test_updateEndpoints(t *testing.T) {
|
|||
{"1.1.1.1:11", false},
|
||||
},
|
||||
},
|
||||
expectedStale: []endpointServicePair{},
|
||||
expectedStale: []endpointServicePair{},
|
||||
expectedHealthchecks: map[types.NamespacedName]int{},
|
||||
}, {
|
||||
// Case[2]: no change, named port
|
||||
// Case[2]: no change, named port, local
|
||||
newEndpoints: []api.Endpoints{
|
||||
makeTestEndpoints("ns1", "ep1", func(ept *api.Endpoints) {
|
||||
ept.Subsets = []api.EndpointSubset{{
|
||||
Addresses: []api.EndpointAddress{{
|
||||
IP: "1.1.1.1",
|
||||
IP: "1.1.1.1",
|
||||
NodeName: &nodeName,
|
||||
}},
|
||||
Ports: []api.EndpointPort{{
|
||||
Name: "p11",
|
||||
|
@ -1450,15 +1431,18 @@ func Test_updateEndpoints(t *testing.T) {
|
|||
},
|
||||
oldEndpoints: map[proxy.ServicePortName][]*endpointsInfo{
|
||||
makeServicePortName("ns1", "ep1", "p11"): {
|
||||
{"1.1.1.1:11", false},
|
||||
{"1.1.1.1:11", true},
|
||||
},
|
||||
},
|
||||
expectedResult: map[proxy.ServicePortName][]*endpointsInfo{
|
||||
makeServicePortName("ns1", "ep1", "p11"): {
|
||||
{"1.1.1.1:11", false},
|
||||
{"1.1.1.1:11", true},
|
||||
},
|
||||
},
|
||||
expectedStale: []endpointServicePair{},
|
||||
expectedHealthchecks: map[types.NamespacedName]int{
|
||||
makeNSN("ns1", "ep1"): 1,
|
||||
},
|
||||
}, {
|
||||
// Case[3]: no change, multiple subsets
|
||||
newEndpoints: []api.Endpoints{
|
||||
|
@ -1498,14 +1482,16 @@ func Test_updateEndpoints(t *testing.T) {
|
|||
{"1.1.1.2:12", false},
|
||||
},
|
||||
},
|
||||
expectedStale: []endpointServicePair{},
|
||||
expectedStale: []endpointServicePair{},
|
||||
expectedHealthchecks: map[types.NamespacedName]int{},
|
||||
}, {
|
||||
// Case[4]: no change, multiple subsets, multiple ports
|
||||
// Case[4]: no change, multiple subsets, multiple ports, local
|
||||
newEndpoints: []api.Endpoints{
|
||||
makeTestEndpoints("ns1", "ep1", func(ept *api.Endpoints) {
|
||||
ept.Subsets = []api.EndpointSubset{{
|
||||
Addresses: []api.EndpointAddress{{
|
||||
IP: "1.1.1.1",
|
||||
IP: "1.1.1.1",
|
||||
NodeName: &nodeName,
|
||||
}},
|
||||
Ports: []api.EndpointPort{{
|
||||
Name: "p11",
|
||||
|
@ -1527,10 +1513,10 @@ func Test_updateEndpoints(t *testing.T) {
|
|||
},
|
||||
oldEndpoints: map[proxy.ServicePortName][]*endpointsInfo{
|
||||
makeServicePortName("ns1", "ep1", "p11"): {
|
||||
{"1.1.1.1:11", false},
|
||||
{"1.1.1.1:11", true},
|
||||
},
|
||||
makeServicePortName("ns1", "ep1", "p12"): {
|
||||
{"1.1.1.1:12", false},
|
||||
{"1.1.1.1:12", true},
|
||||
},
|
||||
makeServicePortName("ns1", "ep1", "p13"): {
|
||||
{"1.1.1.3:13", false},
|
||||
|
@ -1538,16 +1524,19 @@ func Test_updateEndpoints(t *testing.T) {
|
|||
},
|
||||
expectedResult: map[proxy.ServicePortName][]*endpointsInfo{
|
||||
makeServicePortName("ns1", "ep1", "p11"): {
|
||||
{"1.1.1.1:11", false},
|
||||
{"1.1.1.1:11", true},
|
||||
},
|
||||
makeServicePortName("ns1", "ep1", "p12"): {
|
||||
{"1.1.1.1:12", false},
|
||||
{"1.1.1.1:12", true},
|
||||
},
|
||||
makeServicePortName("ns1", "ep1", "p13"): {
|
||||
{"1.1.1.3:13", false},
|
||||
},
|
||||
},
|
||||
expectedStale: []endpointServicePair{},
|
||||
expectedHealthchecks: map[types.NamespacedName]int{
|
||||
makeNSN("ns1", "ep1"): 1,
|
||||
},
|
||||
}, {
|
||||
// Case[5]: no change, multiple endpoints, subsets, IPs, and ports
|
||||
newEndpoints: []api.Endpoints{
|
||||
|
@ -1556,7 +1545,8 @@ func Test_updateEndpoints(t *testing.T) {
|
|||
Addresses: []api.EndpointAddress{{
|
||||
IP: "1.1.1.1",
|
||||
}, {
|
||||
IP: "1.1.1.2",
|
||||
IP: "1.1.1.2",
|
||||
NodeName: &nodeName,
|
||||
}},
|
||||
Ports: []api.EndpointPort{{
|
||||
Name: "p11",
|
||||
|
@ -1569,7 +1559,8 @@ func Test_updateEndpoints(t *testing.T) {
|
|||
Addresses: []api.EndpointAddress{{
|
||||
IP: "1.1.1.3",
|
||||
}, {
|
||||
IP: "1.1.1.4",
|
||||
IP: "1.1.1.4",
|
||||
NodeName: &nodeName,
|
||||
}},
|
||||
Ports: []api.EndpointPort{{
|
||||
Name: "p13",
|
||||
|
@ -1585,7 +1576,8 @@ func Test_updateEndpoints(t *testing.T) {
|
|||
Addresses: []api.EndpointAddress{{
|
||||
IP: "2.2.2.1",
|
||||
}, {
|
||||
IP: "2.2.2.2",
|
||||
IP: "2.2.2.2",
|
||||
NodeName: &nodeName,
|
||||
}},
|
||||
Ports: []api.EndpointPort{{
|
||||
Name: "p21",
|
||||
|
@ -1600,63 +1592,68 @@ func Test_updateEndpoints(t *testing.T) {
|
|||
oldEndpoints: map[proxy.ServicePortName][]*endpointsInfo{
|
||||
makeServicePortName("ns1", "ep1", "p11"): {
|
||||
{"1.1.1.1:11", false},
|
||||
{"1.1.1.2:11", false},
|
||||
{"1.1.1.2:11", true},
|
||||
},
|
||||
makeServicePortName("ns1", "ep1", "p12"): {
|
||||
{"1.1.1.1:12", false},
|
||||
{"1.1.1.2:12", false},
|
||||
{"1.1.1.2:12", true},
|
||||
},
|
||||
makeServicePortName("ns1", "ep1", "p13"): {
|
||||
{"1.1.1.3:13", false},
|
||||
{"1.1.1.4:13", false},
|
||||
{"1.1.1.4:13", true},
|
||||
},
|
||||
makeServicePortName("ns1", "ep1", "p14"): {
|
||||
{"1.1.1.3:14", false},
|
||||
{"1.1.1.4:14", false},
|
||||
{"1.1.1.4:14", true},
|
||||
},
|
||||
makeServicePortName("ns2", "ep2", "p21"): {
|
||||
{"2.2.2.1:21", false},
|
||||
{"2.2.2.2:21", false},
|
||||
{"2.2.2.2:21", true},
|
||||
},
|
||||
makeServicePortName("ns2", "ep2", "p22"): {
|
||||
{"2.2.2.1:22", false},
|
||||
{"2.2.2.2:22", false},
|
||||
{"2.2.2.2:22", true},
|
||||
},
|
||||
},
|
||||
expectedResult: map[proxy.ServicePortName][]*endpointsInfo{
|
||||
makeServicePortName("ns1", "ep1", "p11"): {
|
||||
{"1.1.1.1:11", false},
|
||||
{"1.1.1.2:11", false},
|
||||
{"1.1.1.2:11", true},
|
||||
},
|
||||
makeServicePortName("ns1", "ep1", "p12"): {
|
||||
{"1.1.1.1:12", false},
|
||||
{"1.1.1.2:12", false},
|
||||
{"1.1.1.2:12", true},
|
||||
},
|
||||
makeServicePortName("ns1", "ep1", "p13"): {
|
||||
{"1.1.1.3:13", false},
|
||||
{"1.1.1.4:13", false},
|
||||
{"1.1.1.4:13", true},
|
||||
},
|
||||
makeServicePortName("ns1", "ep1", "p14"): {
|
||||
{"1.1.1.3:14", false},
|
||||
{"1.1.1.4:14", false},
|
||||
{"1.1.1.4:14", true},
|
||||
},
|
||||
makeServicePortName("ns2", "ep2", "p21"): {
|
||||
{"2.2.2.1:21", false},
|
||||
{"2.2.2.2:21", false},
|
||||
{"2.2.2.2:21", true},
|
||||
},
|
||||
makeServicePortName("ns2", "ep2", "p22"): {
|
||||
{"2.2.2.1:22", false},
|
||||
{"2.2.2.2:22", false},
|
||||
{"2.2.2.2:22", true},
|
||||
},
|
||||
},
|
||||
expectedStale: []endpointServicePair{},
|
||||
expectedHealthchecks: map[types.NamespacedName]int{
|
||||
makeNSN("ns1", "ep1"): 2,
|
||||
makeNSN("ns2", "ep2"): 1,
|
||||
},
|
||||
}, {
|
||||
// Case[6]: add an Endpoints
|
||||
newEndpoints: []api.Endpoints{
|
||||
makeTestEndpoints("ns1", "ep1", func(ept *api.Endpoints) {
|
||||
ept.Subsets = []api.EndpointSubset{{
|
||||
Addresses: []api.EndpointAddress{{
|
||||
IP: "1.1.1.1",
|
||||
IP: "1.1.1.1",
|
||||
NodeName: &nodeName,
|
||||
}},
|
||||
Ports: []api.EndpointPort{{
|
||||
Port: 11,
|
||||
|
@ -1667,16 +1664,19 @@ func Test_updateEndpoints(t *testing.T) {
|
|||
oldEndpoints: map[proxy.ServicePortName][]*endpointsInfo{ /* empty */ },
|
||||
expectedResult: map[proxy.ServicePortName][]*endpointsInfo{
|
||||
makeServicePortName("ns1", "ep1", ""): {
|
||||
{"1.1.1.1:11", false},
|
||||
{"1.1.1.1:11", true},
|
||||
},
|
||||
},
|
||||
expectedStale: []endpointServicePair{},
|
||||
expectedHealthchecks: map[types.NamespacedName]int{
|
||||
makeNSN("ns1", "ep1"): 1,
|
||||
},
|
||||
}, {
|
||||
// Case[7]: remove an Endpoints
|
||||
newEndpoints: []api.Endpoints{ /* empty */ },
|
||||
oldEndpoints: map[proxy.ServicePortName][]*endpointsInfo{
|
||||
makeServicePortName("ns1", "ep1", ""): {
|
||||
{"1.1.1.1:11", false},
|
||||
{"1.1.1.1:11", true},
|
||||
},
|
||||
},
|
||||
expectedResult: map[proxy.ServicePortName][]*endpointsInfo{},
|
||||
|
@ -1684,6 +1684,7 @@ func Test_updateEndpoints(t *testing.T) {
|
|||
endpoint: "1.1.1.1:11",
|
||||
servicePortName: makeServicePortName("ns1", "ep1", ""),
|
||||
}},
|
||||
expectedHealthchecks: map[types.NamespacedName]int{},
|
||||
}, {
|
||||
// Case[8]: add an IP and port
|
||||
newEndpoints: []api.Endpoints{
|
||||
|
@ -1692,7 +1693,8 @@ func Test_updateEndpoints(t *testing.T) {
|
|||
Addresses: []api.EndpointAddress{{
|
||||
IP: "1.1.1.1",
|
||||
}, {
|
||||
IP: "1.1.1.2",
|
||||
IP: "1.1.1.2",
|
||||
NodeName: &nodeName,
|
||||
}},
|
||||
Ports: []api.EndpointPort{{
|
||||
Name: "p11",
|
||||
|
@ -1712,14 +1714,17 @@ func Test_updateEndpoints(t *testing.T) {
|
|||
expectedResult: map[proxy.ServicePortName][]*endpointsInfo{
|
||||
makeServicePortName("ns1", "ep1", "p11"): {
|
||||
{"1.1.1.1:11", false},
|
||||
{"1.1.1.2:11", false},
|
||||
{"1.1.1.2:11", true},
|
||||
},
|
||||
makeServicePortName("ns1", "ep1", "p12"): {
|
||||
{"1.1.1.1:12", false},
|
||||
{"1.1.1.2:12", false},
|
||||
{"1.1.1.2:12", true},
|
||||
},
|
||||
},
|
||||
expectedStale: []endpointServicePair{},
|
||||
expectedHealthchecks: map[types.NamespacedName]int{
|
||||
makeNSN("ns1", "ep1"): 1,
|
||||
},
|
||||
}, {
|
||||
// Case[9]: remove an IP and port
|
||||
newEndpoints: []api.Endpoints{
|
||||
|
@ -1760,6 +1765,7 @@ func Test_updateEndpoints(t *testing.T) {
|
|||
endpoint: "1.1.1.2:12",
|
||||
servicePortName: makeServicePortName("ns1", "ep1", "p12"),
|
||||
}},
|
||||
expectedHealthchecks: map[types.NamespacedName]int{},
|
||||
}, {
|
||||
// Case[10]: add a subset
|
||||
newEndpoints: []api.Endpoints{
|
||||
|
@ -1774,7 +1780,8 @@ func Test_updateEndpoints(t *testing.T) {
|
|||
}},
|
||||
}, {
|
||||
Addresses: []api.EndpointAddress{{
|
||||
IP: "2.2.2.2",
|
||||
IP: "2.2.2.2",
|
||||
NodeName: &nodeName,
|
||||
}},
|
||||
Ports: []api.EndpointPort{{
|
||||
Name: "p22",
|
||||
|
@ -1793,10 +1800,13 @@ func Test_updateEndpoints(t *testing.T) {
|
|||
{"1.1.1.1:11", false},
|
||||
},
|
||||
makeServicePortName("ns1", "ep1", "p22"): {
|
||||
{"2.2.2.2:22", false},
|
||||
{"2.2.2.2:22", true},
|
||||
},
|
||||
},
|
||||
expectedStale: []endpointServicePair{},
|
||||
expectedHealthchecks: map[types.NamespacedName]int{
|
||||
makeNSN("ns1", "ep1"): 1,
|
||||
},
|
||||
}, {
|
||||
// Case[11]: remove a subset
|
||||
newEndpoints: []api.Endpoints{
|
||||
|
@ -1829,6 +1839,7 @@ func Test_updateEndpoints(t *testing.T) {
|
|||
endpoint: "2.2.2.2:22",
|
||||
servicePortName: makeServicePortName("ns1", "ep1", "p22"),
|
||||
}},
|
||||
expectedHealthchecks: map[types.NamespacedName]int{},
|
||||
}, {
|
||||
// Case[12]: rename a port
|
||||
newEndpoints: []api.Endpoints{
|
||||
|
@ -1858,6 +1869,7 @@ func Test_updateEndpoints(t *testing.T) {
|
|||
endpoint: "1.1.1.1:11",
|
||||
servicePortName: makeServicePortName("ns1", "ep1", "p11"),
|
||||
}},
|
||||
expectedHealthchecks: map[types.NamespacedName]int{},
|
||||
}, {
|
||||
// Case[13]: renumber a port
|
||||
newEndpoints: []api.Endpoints{
|
||||
|
@ -1887,6 +1899,7 @@ func Test_updateEndpoints(t *testing.T) {
|
|||
endpoint: "1.1.1.1:11",
|
||||
servicePortName: makeServicePortName("ns1", "ep1", "p11"),
|
||||
}},
|
||||
expectedHealthchecks: map[types.NamespacedName]int{},
|
||||
}, {
|
||||
// Case[14]: complex add and remove
|
||||
newEndpoints: []api.Endpoints{
|
||||
|
@ -1928,7 +1941,8 @@ func Test_updateEndpoints(t *testing.T) {
|
|||
makeTestEndpoints("ns4", "ep4", func(ept *api.Endpoints) {
|
||||
ept.Subsets = []api.EndpointSubset{{
|
||||
Addresses: []api.EndpointAddress{{
|
||||
IP: "4.4.4.4",
|
||||
IP: "4.4.4.4",
|
||||
NodeName: &nodeName,
|
||||
}},
|
||||
Ports: []api.EndpointPort{{
|
||||
Name: "p44",
|
||||
|
@ -1942,18 +1956,18 @@ func Test_updateEndpoints(t *testing.T) {
|
|||
{"1.1.1.1:11", false},
|
||||
},
|
||||
makeServicePortName("ns2", "ep2", "p22"): {
|
||||
{"2.2.2.2:22", false},
|
||||
{"2.2.2.22:22", false},
|
||||
{"2.2.2.2:22", true},
|
||||
{"2.2.2.22:22", true},
|
||||
},
|
||||
makeServicePortName("ns2", "ep2", "p23"): {
|
||||
{"2.2.2.3:23", false},
|
||||
{"2.2.2.3:23", true},
|
||||
},
|
||||
makeServicePortName("ns4", "ep4", "p44"): {
|
||||
{"4.4.4.4:44", false},
|
||||
{"4.4.4.5:44", false},
|
||||
{"4.4.4.4:44", true},
|
||||
{"4.4.4.5:44", true},
|
||||
},
|
||||
makeServicePortName("ns4", "ep4", "p45"): {
|
||||
{"4.4.4.6:45", false},
|
||||
{"4.4.4.6:45", true},
|
||||
},
|
||||
},
|
||||
expectedResult: map[proxy.ServicePortName][]*endpointsInfo{
|
||||
|
@ -1971,7 +1985,7 @@ func Test_updateEndpoints(t *testing.T) {
|
|||
{"3.3.3.3:33", false},
|
||||
},
|
||||
makeServicePortName("ns4", "ep4", "p44"): {
|
||||
{"4.4.4.4:44", false},
|
||||
{"4.4.4.4:44", true},
|
||||
},
|
||||
},
|
||||
expectedStale: []endpointServicePair{{
|
||||
|
@ -1990,10 +2004,13 @@ func Test_updateEndpoints(t *testing.T) {
|
|||
endpoint: "4.4.4.6:45",
|
||||
servicePortName: makeServicePortName("ns4", "ep4", "p45"),
|
||||
}},
|
||||
expectedHealthchecks: map[types.NamespacedName]int{
|
||||
makeNSN("ns4", "ep4"): 1,
|
||||
},
|
||||
}}
|
||||
|
||||
for tci, tc := range testCases {
|
||||
newMap, stale := updateEndpoints(tc.newEndpoints, tc.oldEndpoints, "host", fakeHealthChecker{})
|
||||
newMap, hcEndpoints, stale := updateEndpoints(tc.newEndpoints, tc.oldEndpoints, nodeName)
|
||||
if len(newMap) != len(tc.expectedResult) {
|
||||
t.Errorf("[%d] expected %d results, got %d: %v", tci, len(tc.expectedResult), len(newMap), newMap)
|
||||
}
|
||||
|
@ -2016,6 +2033,9 @@ func Test_updateEndpoints(t *testing.T) {
|
|||
t.Errorf("[%d] expected stale[%v], but didn't find it: %v", tci, x, stale)
|
||||
}
|
||||
}
|
||||
if !reflect.DeepEqual(hcEndpoints, tc.expectedHealthchecks) {
|
||||
t.Errorf("[%d] expected healthchecks %v, got %v", tci, tc.expectedHealthchecks, hcEndpoints)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
4
vendor/k8s.io/kubernetes/pkg/registry/rbac/rest/storage_rbac.go
generated
vendored
4
vendor/k8s.io/kubernetes/pkg/registry/rbac/rest/storage_rbac.go
generated
vendored
|
@ -55,6 +55,8 @@ import (
|
|||
"k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac/bootstrappolicy"
|
||||
)
|
||||
|
||||
const PostStartHookName = "rbac/bootstrap-roles"
|
||||
|
||||
type RESTStorageProvider struct {
|
||||
Authorizer authorizer.Authorizer
|
||||
}
|
||||
|
@ -123,7 +125,7 @@ func (p RESTStorageProvider) storage(version schema.GroupVersion, apiResourceCon
|
|||
}
|
||||
|
||||
func (p RESTStorageProvider) PostStartHook() (string, genericapiserver.PostStartHookFunc, error) {
|
||||
return "rbac/bootstrap-roles", PostStartHook, nil
|
||||
return PostStartHookName, PostStartHook, nil
|
||||
}
|
||||
|
||||
func PostStartHook(hookContext genericapiserver.PostStartHookContext) error {
|
||||
|
|
2
vendor/k8s.io/kubernetes/pkg/version/base.go
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/version/base.go
generated
vendored
|
@ -51,7 +51,7 @@ var (
|
|||
// semantic version is a git hash, but the version itself is no
|
||||
// longer the direct output of "git describe", but a slight
|
||||
// translation to be semver compliant.
|
||||
gitVersion string = "v1.6.1+$Format:%h$"
|
||||
gitVersion string = "v1.6.4+$Format:%h$"
|
||||
gitCommit string = "$Format:%H$" // sha1 from git, output of $(git rev-parse HEAD)
|
||||
gitTreeState string = "not a git tree" // state of git tree, either "clean" or "dirty"
|
||||
|
||||
|
|
10
vendor/k8s.io/kubernetes/pkg/volume/photon_pd/photon_pd.go
generated
vendored
10
vendor/k8s.io/kubernetes/pkg/volume/photon_pd/photon_pd.go
generated
vendored
|
@ -151,7 +151,7 @@ func (plugin *photonPersistentDiskPlugin) ConstructVolumeSpec(volumeSpecName, mo
|
|||
// Abstract interface to disk operations.
|
||||
type pdManager interface {
|
||||
// Creates a volume
|
||||
CreateVolume(provisioner *photonPersistentDiskProvisioner) (pdID string, volumeSizeGB int, err error)
|
||||
CreateVolume(provisioner *photonPersistentDiskProvisioner) (pdID string, volumeSizeGB int, fstype string, err error)
|
||||
// Deletes a volume
|
||||
DeleteVolume(deleter *photonPersistentDiskDeleter) error
|
||||
}
|
||||
|
@ -342,11 +342,15 @@ func (plugin *photonPersistentDiskPlugin) newProvisionerInternal(options volume.
|
|||
}
|
||||
|
||||
func (p *photonPersistentDiskProvisioner) Provision() (*v1.PersistentVolume, error) {
|
||||
pdID, sizeGB, err := p.manager.CreateVolume(p)
|
||||
pdID, sizeGB, fstype, err := p.manager.CreateVolume(p)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if fstype == "" {
|
||||
fstype = "ext4"
|
||||
}
|
||||
|
||||
pv := &v1.PersistentVolume{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: p.options.PVName,
|
||||
|
@ -364,7 +368,7 @@ func (p *photonPersistentDiskProvisioner) Provision() (*v1.PersistentVolume, err
|
|||
PersistentVolumeSource: v1.PersistentVolumeSource{
|
||||
PhotonPersistentDisk: &v1.PhotonPersistentDiskVolumeSource{
|
||||
PdID: pdID,
|
||||
FSType: "ext4",
|
||||
FSType: fstype,
|
||||
},
|
||||
},
|
||||
},
|
||||
|
|
4
vendor/k8s.io/kubernetes/pkg/volume/photon_pd/photon_pd_test.go
generated
vendored
4
vendor/k8s.io/kubernetes/pkg/volume/photon_pd/photon_pd_test.go
generated
vendored
|
@ -88,8 +88,8 @@ func contains(modes []v1.PersistentVolumeAccessMode, mode v1.PersistentVolumeAcc
|
|||
type fakePDManager struct {
|
||||
}
|
||||
|
||||
func (fake *fakePDManager) CreateVolume(c *photonPersistentDiskProvisioner) (pdID string, volumeSizeGB int, err error) {
|
||||
return "test-photon-pd-id", 10, nil
|
||||
func (fake *fakePDManager) CreateVolume(c *photonPersistentDiskProvisioner) (pdID string, volumeSizeGB int, fstype string, err error) {
|
||||
return "test-photon-pd-id", 10, "ext4", nil
|
||||
}
|
||||
|
||||
func (fake *fakePDManager) DeleteVolume(cd *photonPersistentDiskDeleter) error {
|
||||
|
|
13
vendor/k8s.io/kubernetes/pkg/volume/photon_pd/photon_util.go
generated
vendored
13
vendor/k8s.io/kubernetes/pkg/volume/photon_pd/photon_util.go
generated
vendored
|
@ -80,11 +80,11 @@ func verifyDevicePath(path string) (string, error) {
|
|||
}
|
||||
|
||||
// CreateVolume creates a PhotonController persistent disk.
|
||||
func (util *PhotonDiskUtil) CreateVolume(p *photonPersistentDiskProvisioner) (pdID string, capacityGB int, err error) {
|
||||
func (util *PhotonDiskUtil) CreateVolume(p *photonPersistentDiskProvisioner) (pdID string, capacityGB int, fstype string, err error) {
|
||||
cloud, err := getCloudProvider(p.plugin.host.GetCloudProvider())
|
||||
if err != nil {
|
||||
glog.Errorf("Photon Controller Util: CreateVolume failed to get cloud provider. Error [%v]", err)
|
||||
return "", 0, err
|
||||
return "", 0, "", err
|
||||
}
|
||||
|
||||
capacity := p.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
|
||||
|
@ -102,20 +102,23 @@ func (util *PhotonDiskUtil) CreateVolume(p *photonPersistentDiskProvisioner) (pd
|
|||
switch strings.ToLower(parameter) {
|
||||
case "flavor":
|
||||
volumeOptions.Flavor = value
|
||||
case "fstype":
|
||||
fstype = value
|
||||
glog.V(4).Infof("Photon Controller Util: Setting fstype to %s", fstype)
|
||||
default:
|
||||
glog.Errorf("Photon Controller Util: invalid option %s for volume plugin %s.", parameter, p.plugin.GetPluginName())
|
||||
return "", 0, fmt.Errorf("Photon Controller Util: invalid option %s for volume plugin %s.", parameter, p.plugin.GetPluginName())
|
||||
return "", 0, "", fmt.Errorf("Photon Controller Util: invalid option %s for volume plugin %s.", parameter, p.plugin.GetPluginName())
|
||||
}
|
||||
}
|
||||
|
||||
pdID, err = cloud.CreateDisk(volumeOptions)
|
||||
if err != nil {
|
||||
glog.Errorf("Photon Controller Util: failed to CreateDisk. Error [%v]", err)
|
||||
return "", 0, err
|
||||
return "", 0, "", err
|
||||
}
|
||||
|
||||
glog.V(4).Infof("Successfully created Photon Controller persistent disk %s", name)
|
||||
return pdID, volSizeGB, nil
|
||||
return pdID, volSizeGB, "", nil
|
||||
}
|
||||
|
||||
// DeleteVolume deletes a vSphere volume.
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue