Switch to github.com/golang/dep for vendoring
Signed-off-by: Mrunal Patel <mrunalp@gmail.com>
This commit is contained in:
parent
d6ab91be27
commit
8e5b17cf13
15431 changed files with 3971413 additions and 8881 deletions
20
vendor/k8s.io/kubernetes/cluster/saltbase/README.md
generated
vendored
Normal file
20
vendor/k8s.io/kubernetes/cluster/saltbase/README.md
generated
vendored
Normal file
|
@ -0,0 +1,20 @@
|
|||
# SaltStack configuration
|
||||
|
||||
This is the root of the SaltStack configuration for Kubernetes. A high
|
||||
level overview for the Kubernetes SaltStack configuration can be found [in the docs tree.](../../docs/admin/salt.md)
|
||||
|
||||
This SaltStack configuration currently applies to default
|
||||
configurations for Debian-on-GCE, Fedora-on-Vagrant, Ubuntu-on-AWS and
|
||||
Ubuntu-on-Azure. (That doesn't mean it can't be made to apply to an
|
||||
arbitrary configuration, but those are only the in-tree OS/IaaS
|
||||
combinations supported today.) As you peruse the configuration, these
|
||||
are shorthanded as `gce`, `vagrant`, `aws`, `azure-legacy` in `grains.cloud`;
|
||||
the documentation in this tree uses this same shorthand for convenience.
|
||||
|
||||
See more:
|
||||
* [pillar](pillar/)
|
||||
* [reactor](reactor/)
|
||||
* [salt](salt/)
|
||||
|
||||
|
||||
[]()
|
109
vendor/k8s.io/kubernetes/cluster/saltbase/install.sh
generated
vendored
Executable file
109
vendor/k8s.io/kubernetes/cluster/saltbase/install.sh
generated
vendored
Executable file
|
@ -0,0 +1,109 @@
|
|||
#!/bin/bash
|
||||
|
||||
# Copyright 2014 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# This script will set up the salt directory on the target server. It takes one
|
||||
# argument that is a tarball with the pre-compiled kubernetes server binaries.
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
SALT_ROOT=$(dirname "${BASH_SOURCE}")
|
||||
readonly SALT_ROOT
|
||||
|
||||
readonly KUBE_DOCKER_WRAPPED_BINARIES=(
|
||||
kube-apiserver
|
||||
kube-controller-manager
|
||||
kube-scheduler
|
||||
kube-proxy
|
||||
)
|
||||
|
||||
readonly SERVER_BIN_TAR=${1-}
|
||||
if [[ -z "$SERVER_BIN_TAR" ]]; then
|
||||
echo "!!! No binaries specified"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Create a temp dir for untaring
|
||||
KUBE_TEMP=$(mktemp --tmpdir=/srv -d -t kubernetes.XXXXXX)
|
||||
trap 'rm -rf "${KUBE_TEMP}"' EXIT
|
||||
|
||||
# This file is meant to run on the master. It will install the salt configs
|
||||
# into the appropriate place on the master. We do this by creating a new set of
|
||||
# salt trees and then quickly mv'ing them where the old ones were.
|
||||
|
||||
readonly SALTDIRS=(salt pillar reactor)
|
||||
|
||||
echo "+++ Installing salt files into new trees"
|
||||
rm -rf /srv/salt-new
|
||||
mkdir -p /srv/salt-new
|
||||
|
||||
# This bash voodoo will prepend $SALT_ROOT to the start of each item in the
|
||||
# $SALTDIRS array
|
||||
cp -v -R --preserve=mode "${SALTDIRS[@]/#/${SALT_ROOT}/}" /srv/salt-new
|
||||
|
||||
echo "+++ Installing salt overlay files"
|
||||
for dir in "${SALTDIRS[@]}"; do
|
||||
if [[ -d "/srv/salt-overlay/$dir" ]]; then
|
||||
cp -v -R --preserve=mode "/srv/salt-overlay/$dir" "/srv/salt-new/"
|
||||
fi
|
||||
done
|
||||
|
||||
echo "+++ Install binaries from tar: $1"
|
||||
tar -xz -C "${KUBE_TEMP}" -f "$1"
|
||||
mkdir -p /srv/salt-new/salt/kube-bins
|
||||
mkdir -p /srv/salt-new/salt/kube-docs
|
||||
cp -v "${KUBE_TEMP}/kubernetes/server/bin/"* /srv/salt-new/salt/kube-bins/
|
||||
cp -v "${KUBE_TEMP}/kubernetes/LICENSES" /srv/salt-new/salt/kube-docs/
|
||||
cp -v "${KUBE_TEMP}/kubernetes/kubernetes-src.tar.gz" /srv/salt-new/salt/kube-docs/
|
||||
|
||||
kube_bin_dir="/srv/salt-new/salt/kube-bins";
|
||||
docker_images_sls_file="/srv/salt-new/pillar/docker-images.sls";
|
||||
for docker_file in "${KUBE_DOCKER_WRAPPED_BINARIES[@]}"; do
|
||||
docker_tag=$(cat ${kube_bin_dir}/${docker_file}.docker_tag);
|
||||
if [[ ! -z "${KUBE_IMAGE_TAG:-}" ]]; then
|
||||
docker_tag="${KUBE_IMAGE_TAG}"
|
||||
fi
|
||||
sed -i "s/#${docker_file}_docker_tag_value#/${docker_tag}/" "${docker_images_sls_file}";
|
||||
done
|
||||
|
||||
cat <<EOF >>"${docker_images_sls_file}"
|
||||
kube_docker_registry: '$(echo ${KUBE_DOCKER_REGISTRY:-gcr.io/google_containers})'
|
||||
EOF
|
||||
|
||||
# TODO(zmerlynn): Forgive me, this is really gross. But in order to
|
||||
# avoid breaking the non-Salt deployments, which already painfully
|
||||
# have to templatize a couple of the add-ons anyways, manually
|
||||
# templatize the addon registry for regional support. When we get
|
||||
# better templating, we can fix this.
|
||||
readonly kube_addon_registry="${KUBE_ADDON_REGISTRY:-gcr.io/google_containers}"
|
||||
if [[ "${kube_addon_registry}" != "gcr.io/google_containers" ]]; then
|
||||
find /srv/salt-new -name \*.yaml -or -name \*.yaml.in | \
|
||||
xargs sed -ri "s@(image:\s.*)gcr.io/google_containers@\1${kube_addon_registry}@"
|
||||
# All the legacy .manifest files with hardcoded gcr.io are JSON.
|
||||
find /srv/salt-new -name \*.manifest -or -name \*.json | \
|
||||
xargs sed -ri "s@(image\":\s+\")gcr.io/google_containers@\1${kube_addon_registry}@"
|
||||
fi
|
||||
|
||||
echo "+++ Swapping in new configs"
|
||||
for dir in "${SALTDIRS[@]}"; do
|
||||
if [[ -d "/srv/$dir" ]]; then
|
||||
rm -rf "/srv/$dir"
|
||||
fi
|
||||
mv -v "/srv/salt-new/$dir" "/srv/$dir"
|
||||
done
|
||||
|
||||
rm -rf /srv/salt-new
|
22
vendor/k8s.io/kubernetes/cluster/saltbase/pillar/README.md
generated
vendored
Normal file
22
vendor/k8s.io/kubernetes/cluster/saltbase/pillar/README.md
generated
vendored
Normal file
|
@ -0,0 +1,22 @@
|
|||
The
|
||||
[SaltStack pillar](http://docs.saltstack.com/en/latest/topics/pillar/)
|
||||
data is partially statically derived from the contents of this
|
||||
directory. The bulk of the pillars are hard to perceive from browsing
|
||||
this directory, though, because they are written into
|
||||
[cluster-params.sls](cluster-params.sls) at cluster inception.
|
||||
|
||||
* [cluster-params.sls](cluster-params.sls) is generated entirely at cluster inception. See e.g. [configure-vm.sh](../../gce/configure-vm.sh#L262)
|
||||
* [docker-images.sls](docker-images.sls) stores the Docker tags of the current Docker-wrapped server binaries, twiddling by the Salt install script
|
||||
* [logging.sls](logging.sls) defines the cluster log level
|
||||
* [mine.sls](mine.sls): defines the variables shared across machines in the Salt
|
||||
mine. It is starting to be largely deprecated in use, and is totally
|
||||
unavailable on GCE, which runs standalone.
|
||||
* [privilege.sls](privilege.sls) defines whether privileged containers are allowed.
|
||||
* [top.sls](top.sls) defines which pillars are active across the cluster.
|
||||
|
||||
## Future work
|
||||
|
||||
Document the current pillars across providers
|
||||
|
||||
|
||||
[]()
|
4
vendor/k8s.io/kubernetes/cluster/saltbase/pillar/cluster-params.sls
generated
vendored
Normal file
4
vendor/k8s.io/kubernetes/cluster/saltbase/pillar/cluster-params.sls
generated
vendored
Normal file
|
@ -0,0 +1,4 @@
|
|||
# This file is meant to be replaced with cluster specific parameters if necessary.
|
||||
|
||||
# Examples:
|
||||
# node_instance_prefix: <base of regex for -minion_regexp to apiserver>
|
5
vendor/k8s.io/kubernetes/cluster/saltbase/pillar/docker-images.sls
generated
vendored
Normal file
5
vendor/k8s.io/kubernetes/cluster/saltbase/pillar/docker-images.sls
generated
vendored
Normal file
|
@ -0,0 +1,5 @@
|
|||
# This file is populated when kubernetes is built.
|
||||
kube-apiserver_docker_tag: #kube-apiserver_docker_tag_value#
|
||||
kube-controller-manager_docker_tag: #kube-controller-manager_docker_tag_value#
|
||||
kube-scheduler_docker_tag: #kube-scheduler_docker_tag_value#
|
||||
kube-proxy_docker_tag: #kube-proxy_docker_tag_value#
|
1
vendor/k8s.io/kubernetes/cluster/saltbase/pillar/logging.sls
generated
vendored
Normal file
1
vendor/k8s.io/kubernetes/cluster/saltbase/pillar/logging.sls
generated
vendored
Normal file
|
@ -0,0 +1 @@
|
|||
log_level: "--v=2"
|
12
vendor/k8s.io/kubernetes/cluster/saltbase/pillar/mine.sls
generated
vendored
Normal file
12
vendor/k8s.io/kubernetes/cluster/saltbase/pillar/mine.sls
generated
vendored
Normal file
|
@ -0,0 +1,12 @@
|
|||
{% if grains.cloud is defined and grains.cloud == 'gce' -%}
|
||||
# On GCE, there is no Salt mine. We run standalone.
|
||||
{% else %}
|
||||
# Allow everyone to see cached values of who sits at what IP
|
||||
{% set networkInterfaceName = "eth0" %}
|
||||
{% if grains.networkInterfaceName is defined %}
|
||||
{% set networkInterfaceName = grains.networkInterfaceName %}
|
||||
{% endif %}
|
||||
mine_functions:
|
||||
network.ip_addrs: [{{networkInterfaceName}}]
|
||||
grains.items: []
|
||||
{% endif -%}
|
2
vendor/k8s.io/kubernetes/cluster/saltbase/pillar/privilege.sls
generated
vendored
Normal file
2
vendor/k8s.io/kubernetes/cluster/saltbase/pillar/privilege.sls
generated
vendored
Normal file
|
@ -0,0 +1,2 @@
|
|||
# If true, allow privileged containers to be created by API
|
||||
allow_privileged: true
|
9
vendor/k8s.io/kubernetes/cluster/saltbase/pillar/systemd.sls
generated
vendored
Normal file
9
vendor/k8s.io/kubernetes/cluster/saltbase/pillar/systemd.sls
generated
vendored
Normal file
|
@ -0,0 +1,9 @@
|
|||
{% if grains['oscodename'] in [ 'vivid', 'wily', 'jessie', 'xenial', 'yakkety' ] %}
|
||||
is_systemd: True
|
||||
systemd_system_path: /lib/systemd/system
|
||||
{% elif grains['os_family'] == 'RedHat' %}
|
||||
is_systemd: True
|
||||
systemd_system_path: /usr/lib/systemd/system
|
||||
{% else %}
|
||||
is_systemd: False
|
||||
{% endif %}
|
8
vendor/k8s.io/kubernetes/cluster/saltbase/pillar/top.sls
generated
vendored
Normal file
8
vendor/k8s.io/kubernetes/cluster/saltbase/pillar/top.sls
generated
vendored
Normal file
|
@ -0,0 +1,8 @@
|
|||
base:
|
||||
'*':
|
||||
- mine
|
||||
- cluster-params
|
||||
- logging
|
||||
- docker-images
|
||||
- privilege
|
||||
- systemd
|
6
vendor/k8s.io/kubernetes/cluster/saltbase/reactor/README.md
generated
vendored
Normal file
6
vendor/k8s.io/kubernetes/cluster/saltbase/reactor/README.md
generated
vendored
Normal file
|
@ -0,0 +1,6 @@
|
|||
[SaltStack reactor](http://docs.saltstack.com/en/latest/topics/reactor/) files, largely defining reactions to new nodes.
|
||||
|
||||
**Ignored for GCE, which runs standalone on each machine**
|
||||
|
||||
|
||||
[]()
|
10
vendor/k8s.io/kubernetes/cluster/saltbase/reactor/highstate-masters.sls
generated
vendored
Normal file
10
vendor/k8s.io/kubernetes/cluster/saltbase/reactor/highstate-masters.sls
generated
vendored
Normal file
|
@ -0,0 +1,10 @@
|
|||
# This runs highstate on the master node(s).
|
||||
#
|
||||
# Some of the cluster deployment scripts pass the list of minion addresses to
|
||||
# the apiserver as a command line argument. This list needs to be updated if a
|
||||
# new minion is started, so run highstate on the master(s) when this happens.
|
||||
#
|
||||
highstate_master:
|
||||
cmd.state.highstate:
|
||||
- tgt: 'roles:kubernetes-master'
|
||||
- expr_form: grain
|
10
vendor/k8s.io/kubernetes/cluster/saltbase/reactor/highstate-minions.sls
generated
vendored
Normal file
10
vendor/k8s.io/kubernetes/cluster/saltbase/reactor/highstate-minions.sls
generated
vendored
Normal file
|
@ -0,0 +1,10 @@
|
|||
# This runs highstate on the minion nodes.
|
||||
#
|
||||
# Some of the cluster deployment scripts use the list of minions on the minions
|
||||
# themselves. To propagate changes throughout
|
||||
# the pool, run highstate on all minions whenever a single minion starts.
|
||||
#
|
||||
highstate_minions:
|
||||
cmd.state.highstate:
|
||||
- tgt: 'roles:kubernetes-pool'
|
||||
- expr_form: grain
|
4
vendor/k8s.io/kubernetes/cluster/saltbase/reactor/highstate-new.sls
generated
vendored
Normal file
4
vendor/k8s.io/kubernetes/cluster/saltbase/reactor/highstate-new.sls
generated
vendored
Normal file
|
@ -0,0 +1,4 @@
|
|||
# This runs highstate only on the NEW node, regardless of type.
|
||||
highstate_new:
|
||||
cmd.state.highstate:
|
||||
- tgt: {{ data['id'] }}
|
32
vendor/k8s.io/kubernetes/cluster/saltbase/salt/README.md
generated
vendored
Normal file
32
vendor/k8s.io/kubernetes/cluster/saltbase/salt/README.md
generated
vendored
Normal file
|
@ -0,0 +1,32 @@
|
|||
This directory forms the base of the main SaltStack configuration. The
|
||||
place to start with any SaltStack configuration is
|
||||
[top.sls](top.sls). However, unless you are particularly keen on
|
||||
reading Jinja templates, the following tables break down what
|
||||
configurations run on what providers. (NB: The [_states](_states/)
|
||||
directory is a special directory included by Salt for `ensure` blocks,
|
||||
and is only used for the [docker](docker/) config.)
|
||||
|
||||
Key: M = Config applies to master, n = config applies to nodes
|
||||
|
||||
Config | GCE | Vagrant | AWS | Azure
|
||||
----------------------------------------------------|-------|---------|-----|------
|
||||
[debian-auto-upgrades](debian-auto-upgrades/) | M n | M n | M n | M n
|
||||
[docker](docker/) | M n | M n | M n | M n
|
||||
[etcd](etcd/) | M | M | M | M
|
||||
[generate-cert](generate-cert/) | M | M | M | M
|
||||
[kube-addons](kube-addons/) | M | M | M | M
|
||||
[kube-apiserver](kube-apiserver/) | M | M | M | M
|
||||
[kube-controller-manager](kube-controller-manager/) | M | M | M | M
|
||||
[kube-proxy](kube-proxy/) | n | n | n | n
|
||||
[kube-scheduler](kube-scheduler/) | M | M | M | M
|
||||
[kubelet](kubelet/) | M n | M n | M n | M n
|
||||
[logrotate](logrotate/) | M n | n | M n | M n
|
||||
[supervisord](supervisor/) | M n | M n | M n | M n
|
||||
[nginx](nginx/) | | | | M
|
||||
[openvpn-client](openvpn-client/) | | | | n
|
||||
[openvpn](openvpn/) | | | | M
|
||||
[base](base.sls) | M n | M n | M n | M n
|
||||
[kube-client-tools](kube-client-tools.sls) | M | M | M | M
|
||||
|
||||
|
||||
[]()
|
60
vendor/k8s.io/kubernetes/cluster/saltbase/salt/base.sls
generated
vendored
Normal file
60
vendor/k8s.io/kubernetes/cluster/saltbase/salt/base.sls
generated
vendored
Normal file
|
@ -0,0 +1,60 @@
|
|||
pkg-core:
|
||||
pkg.installed:
|
||||
- names:
|
||||
- curl
|
||||
- ebtables
|
||||
{% if grains['os_family'] == 'RedHat' %}
|
||||
- python
|
||||
- git
|
||||
- socat
|
||||
{% else %}
|
||||
- apt-transport-https
|
||||
- python-apt
|
||||
- nfs-common
|
||||
- socat
|
||||
{% endif %}
|
||||
# Ubuntu installs netcat-openbsd by default, but on GCE/Debian netcat-traditional is installed.
|
||||
# They behave slightly differently.
|
||||
# For sanity, we try to make sure we have the same netcat on all OSes (#15166)
|
||||
{% if grains['os'] == 'Ubuntu' %}
|
||||
- netcat-traditional
|
||||
{% endif %}
|
||||
# Make sure git is installed for mounting git volumes
|
||||
{% if grains['os'] == 'Ubuntu' %}
|
||||
- git
|
||||
{% endif %}
|
||||
|
||||
# Fix ARP cache issues on AWS by setting net.ipv4.neigh.default.gc_thresh1=0
|
||||
# See issue #23395
|
||||
{% if grains.get('cloud') == 'aws' %}
|
||||
# Work around Salt #18089: https://github.com/saltstack/salt/issues/18089
|
||||
# (we also have to give it a different id from the same fix elsewhere)
|
||||
99-salt-conf-with-a-different-id:
|
||||
file.touch:
|
||||
- name: /etc/sysctl.d/99-salt.conf
|
||||
|
||||
net.ipv4.neigh.default.gc_thresh1:
|
||||
sysctl.present:
|
||||
- value: 0
|
||||
{% endif %}
|
||||
|
||||
/usr/local/share/doc/kubernetes:
|
||||
file.directory:
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 755
|
||||
- makedirs: True
|
||||
|
||||
/usr/local/share/doc/kubernetes/LICENSES:
|
||||
file.managed:
|
||||
- source: salt://kube-docs/LICENSES
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 644
|
||||
|
||||
/usr/local/share/doc/kubernetes/kubernetes-src.tar.gz:
|
||||
file.managed:
|
||||
- source: salt://kube-docs/kubernetes-src.tar.gz
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 644
|
16
vendor/k8s.io/kubernetes/cluster/saltbase/salt/calico/10-calico.conf
generated
vendored
Normal file
16
vendor/k8s.io/kubernetes/cluster/saltbase/salt/calico/10-calico.conf
generated
vendored
Normal file
|
@ -0,0 +1,16 @@
|
|||
{
|
||||
"name": "k8s-pod-network",
|
||||
"type": "calico",
|
||||
"etcd_authority": "10.0.0.17:6666",
|
||||
"log_level": "info",
|
||||
"ipam": {
|
||||
"type": "host-local",
|
||||
"subnet": "usePodCidr"
|
||||
},
|
||||
"policy": {
|
||||
"type": "k8s"
|
||||
},
|
||||
"kubernetes": {
|
||||
"kubeconfig": "/var/lib/kubelet/kubeconfig"
|
||||
}
|
||||
}
|
40
vendor/k8s.io/kubernetes/cluster/saltbase/salt/calico/calico-node.manifest
generated
vendored
Normal file
40
vendor/k8s.io/kubernetes/cluster/saltbase/salt/calico/calico-node.manifest
generated
vendored
Normal file
|
@ -0,0 +1,40 @@
|
|||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: calico-node
|
||||
namespace: kube-system
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
k8s-app: calico-node
|
||||
spec:
|
||||
hostNetwork: true
|
||||
containers:
|
||||
- name: calico-node
|
||||
image: quay.io/calico/node:v0.20.0
|
||||
env:
|
||||
- name: ETCD_ENDPOINTS
|
||||
value: "http://10.0.0.17:6666"
|
||||
- name: CALICO_NETWORKING
|
||||
value: "false"
|
||||
securityContext:
|
||||
privileged: true
|
||||
volumeMounts:
|
||||
- mountPath: /lib/modules
|
||||
name: lib-modules
|
||||
readOnly: true
|
||||
- mountPath: /var/log/calico
|
||||
name: var-log-calico
|
||||
readOnly: false
|
||||
- mountPath: /var/run/calico
|
||||
name: var-run-calico
|
||||
readOnly: false
|
||||
volumes:
|
||||
- name: lib-modules
|
||||
hostPath:
|
||||
path: /lib/modules
|
||||
- name: var-run-calico
|
||||
hostPath:
|
||||
path: /var/run/calico
|
||||
- name: var-log-calico
|
||||
hostPath:
|
||||
path: /var/log/calico
|
19
vendor/k8s.io/kubernetes/cluster/saltbase/salt/calico/master.sls
generated
vendored
Normal file
19
vendor/k8s.io/kubernetes/cluster/saltbase/salt/calico/master.sls
generated
vendored
Normal file
|
@ -0,0 +1,19 @@
|
|||
{% if pillar.get('network_policy_provider', '').lower() == 'calico' %}
|
||||
|
||||
calico-policy-controller:
|
||||
file.managed:
|
||||
- name: /etc/kubernetes/manifests/calico-policy-controller.manifest
|
||||
- source: salt://calico/calico-policy-controller.manifest
|
||||
- template: jinja
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 644
|
||||
- makedirs: true
|
||||
- dir_mode: 755
|
||||
- context:
|
||||
cpurequest: '20m'
|
||||
- require:
|
||||
- service: docker
|
||||
- service: kubelet
|
||||
|
||||
{% endif -%}
|
41
vendor/k8s.io/kubernetes/cluster/saltbase/salt/calico/node.sls
generated
vendored
Normal file
41
vendor/k8s.io/kubernetes/cluster/saltbase/salt/calico/node.sls
generated
vendored
Normal file
|
@ -0,0 +1,41 @@
|
|||
{% if pillar.get('network_policy_provider', '').lower() == 'calico' %}
|
||||
|
||||
calico-node:
|
||||
file.managed:
|
||||
- name: /etc/kubernetes/manifests/calico-node.manifest
|
||||
- source: salt://calico/calico-node.manifest
|
||||
- template: jinja
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 644
|
||||
- makedirs: true
|
||||
- dir_mode: 755
|
||||
- require:
|
||||
- kmod: ip6_tables
|
||||
- kmod: xt_set
|
||||
- service: docker
|
||||
- service: kubelet
|
||||
|
||||
calico-cni:
|
||||
file.managed:
|
||||
- name: /opt/cni/bin/calico
|
||||
- source: https://github.com/projectcalico/calico-cni/releases/download/v1.3.1/calico
|
||||
- source_hash: sha256=ac05cb9254b5aaa5822cf10325983431bd25489147f2edf9dec7e43d99c43e77
|
||||
- makedirs: True
|
||||
- mode: 744
|
||||
|
||||
calico-cni-config:
|
||||
file.managed:
|
||||
- name: /etc/cni/net.d/10-calico.conf
|
||||
- source: salt://calico/10-calico.conf
|
||||
- makedirs: True
|
||||
- mode: 644
|
||||
- template: jinja
|
||||
|
||||
ip6_tables:
|
||||
kmod.present
|
||||
|
||||
xt_set:
|
||||
kmod.present
|
||||
|
||||
{% endif -%}
|
87
vendor/k8s.io/kubernetes/cluster/saltbase/salt/cluster-autoscaler/cluster-autoscaler.manifest
generated
vendored
Normal file
87
vendor/k8s.io/kubernetes/cluster/saltbase/salt/cluster-autoscaler/cluster-autoscaler.manifest
generated
vendored
Normal file
|
@ -0,0 +1,87 @@
|
|||
{% if pillar.get('enable_cluster_autoscaler', '').lower() == 'true' %}
|
||||
{% set cloud_config = "" -%}
|
||||
{% set cloud_config_mount = "" -%}
|
||||
{% set cloud_config_volume = "" -%}
|
||||
{% if grains.cloud == 'gce' and grains.cloud_config is defined -%}
|
||||
{% set cloud_config = "--cloud-config=" + grains.cloud_config -%}
|
||||
{% set cloud_config_mount = "{\"name\": \"cloudconfigmount\",\"mountPath\": \"" + grains.cloud_config + "\", \"readOnly\": true}," -%}
|
||||
{% set cloud_config_volume = "{\"name\": \"cloudconfigmount\",\"hostPath\": {\"path\": \"" + grains.cloud_config + "\"}}," -%}
|
||||
{% endif -%}
|
||||
{% set params = pillar['autoscaler_mig_config'] + " " + cloud_config -%}
|
||||
|
||||
{
|
||||
"kind": "Pod",
|
||||
"apiVersion": "v1",
|
||||
"metadata": {
|
||||
"name": "cluster-autoscaler",
|
||||
"namespace": "kube-system",
|
||||
"labels": {
|
||||
"tier": "cluster-management",
|
||||
"component": "cluster-autoscaler"
|
||||
}
|
||||
},
|
||||
"spec": {
|
||||
"hostNetwork": true,
|
||||
"containers": [
|
||||
{
|
||||
"name": "cluster-autoscaler",
|
||||
"image": "gcr.io/google_containers/cluster-autoscaler:v0.4.0",
|
||||
"command": [
|
||||
"/bin/sh",
|
||||
"-c",
|
||||
"./cluster-autoscaler --kubernetes=http://127.0.0.1:8080?inClusterConfig=f --v=4 {{params}} 1>>/var/log/cluster-autoscaler.log 2>&1"
|
||||
],
|
||||
# TODO: Make resource requirements depend on the size of the cluster
|
||||
"resources": {
|
||||
"requests": {
|
||||
"cpu": "20m",
|
||||
"memory": "300Mi"
|
||||
}
|
||||
},
|
||||
"volumeMounts": [
|
||||
{{cloud_config_mount}}
|
||||
{
|
||||
"name": "ssl-certs",
|
||||
"readOnly": true,
|
||||
"mountPath": "/etc/ssl/certs"
|
||||
},
|
||||
{
|
||||
"name": "usrsharecacerts",
|
||||
"readOnly": true,
|
||||
"mountPath": "/usr/share/ca-certificates"
|
||||
},
|
||||
{
|
||||
"name": "logfile",
|
||||
"mountPath": "/var/log/cluster-autoscaler.log",
|
||||
"readOnly": false
|
||||
}
|
||||
],
|
||||
"terminationMessagePath": "/dev/termination-log",
|
||||
"imagePullPolicy": "IfNotPresent"
|
||||
}
|
||||
],
|
||||
"volumes": [
|
||||
{{cloud_config_volume}}
|
||||
{
|
||||
"name": "ssl-certs",
|
||||
"hostPath": {
|
||||
"path": "/etc/ssl/certs"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "usrsharecacerts",
|
||||
"hostPath": {
|
||||
"path": "/usr/share/ca-certificates"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "logfile",
|
||||
"hostPath": {
|
||||
"path": "/var/log/cluster-autoscaler.log"
|
||||
}
|
||||
}
|
||||
],
|
||||
"restartPolicy": "Always"
|
||||
}
|
||||
}
|
||||
{% endif %}
|
25
vendor/k8s.io/kubernetes/cluster/saltbase/salt/cluster-autoscaler/init.sls
generated
vendored
Normal file
25
vendor/k8s.io/kubernetes/cluster/saltbase/salt/cluster-autoscaler/init.sls
generated
vendored
Normal file
|
@ -0,0 +1,25 @@
|
|||
# Copy autoscaler manifest to manifests folder for master.
|
||||
# The ordering of salt states for service docker, kubelet and
|
||||
# master-addon below is very important to avoid the race between
|
||||
# salt restart docker or kubelet and kubelet start master components.
|
||||
# Please see http://issue.k8s.io/10122#issuecomment-114566063
|
||||
# for detail explanation on this very issue.
|
||||
|
||||
/etc/kubernetes/manifests/cluster-autoscaler.manifest:
|
||||
file.managed:
|
||||
- source: salt://cluster-autoscaler/cluster-autoscaler.manifest
|
||||
- template: jinja
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 644
|
||||
- makedirs: true
|
||||
- dir_mode: 755
|
||||
- require:
|
||||
- service: docker
|
||||
- service: kubelet
|
||||
|
||||
/var/log/cluster-autoscaler.log:
|
||||
file.managed:
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 644
|
41
vendor/k8s.io/kubernetes/cluster/saltbase/salt/cni/init.sls
generated
vendored
Normal file
41
vendor/k8s.io/kubernetes/cluster/saltbase/salt/cni/init.sls
generated
vendored
Normal file
|
@ -0,0 +1,41 @@
|
|||
/opt/cni:
|
||||
file.directory:
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 755
|
||||
- makedirs: True
|
||||
|
||||
/etc/cni/net.d:
|
||||
file.directory:
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 755
|
||||
- makedirs: True
|
||||
|
||||
# These are all available CNI network plugins.
|
||||
cni-tar:
|
||||
archive:
|
||||
- extracted
|
||||
- user: root
|
||||
- name: /opt/cni
|
||||
- makedirs: True
|
||||
- source: https://storage.googleapis.com/kubernetes-release/network-plugins/cni-07a8a28637e97b22eb8dfe710eeae1344f69d16e.tar.gz
|
||||
- tar_options: v
|
||||
- source_hash: md5=dd11b04dcb0b9aca00b5287ba18dcfaa
|
||||
- archive_format: tar
|
||||
- if_missing: /opt/cni/bin
|
||||
|
||||
{% if grains['cloud'] is defined and grains.cloud in [ 'vagrant' ] %}
|
||||
# Install local CNI network plugins in a Vagrant environment
|
||||
cmd-local-cni-plugins:
|
||||
cmd.run:
|
||||
- name: |
|
||||
cp -v /vagrant/cluster/network-plugins/cni/bin/* /opt/cni/bin/.
|
||||
chmod +x /opt/cni/bin/*
|
||||
cmd-local-cni-config:
|
||||
cmd.run:
|
||||
- name: |
|
||||
cp -v /vagrant/cluster/network-plugins/cni/config/* /etc/cni/net.d/.
|
||||
chown root:root /etc/cni/net.d/*
|
||||
chmod 744 /etc/cni/net.d/*
|
||||
{% endif -%}
|
4
vendor/k8s.io/kubernetes/cluster/saltbase/salt/debian-auto-upgrades/20auto-upgrades
generated
vendored
Normal file
4
vendor/k8s.io/kubernetes/cluster/saltbase/salt/debian-auto-upgrades/20auto-upgrades
generated
vendored
Normal file
|
@ -0,0 +1,4 @@
|
|||
APT::Periodic::Update-Package-Lists "1";
|
||||
APT::Periodic::Unattended-Upgrade "1";
|
||||
|
||||
APT::Periodic::AutocleanInterval "7";
|
13
vendor/k8s.io/kubernetes/cluster/saltbase/salt/debian-auto-upgrades/init.sls
generated
vendored
Normal file
13
vendor/k8s.io/kubernetes/cluster/saltbase/salt/debian-auto-upgrades/init.sls
generated
vendored
Normal file
|
@ -0,0 +1,13 @@
|
|||
{% if grains['os_family'] == 'Debian' %}
|
||||
unattended-upgrades:
|
||||
pkg.installed
|
||||
|
||||
'/etc/apt/apt.conf.d/20auto-upgrades':
|
||||
file.managed:
|
||||
- source: salt://debian-auto-upgrades/20auto-upgrades
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 644
|
||||
- require:
|
||||
- pkg: unattended-upgrades
|
||||
{% endif %}
|
8
vendor/k8s.io/kubernetes/cluster/saltbase/salt/docker/default
generated
vendored
Normal file
8
vendor/k8s.io/kubernetes/cluster/saltbase/salt/docker/default
generated
vendored
Normal file
|
@ -0,0 +1,8 @@
|
|||
{% set docker_opts = "" -%}
|
||||
{% if grains.docker_opts is defined and grains.docker_opts -%}
|
||||
{% set docker_opts = grains.docker_opts -%}
|
||||
{% endif -%}
|
||||
|
||||
DOCKER_OPTS='{{docker_opts}}'
|
||||
OPTIONS='{{docker_opts}}'
|
||||
DOCKER_CERT_PATH=/etc/docker
|
18
vendor/k8s.io/kubernetes/cluster/saltbase/salt/docker/docker-defaults
generated
vendored
Normal file
18
vendor/k8s.io/kubernetes/cluster/saltbase/salt/docker/docker-defaults
generated
vendored
Normal file
|
@ -0,0 +1,18 @@
|
|||
{% set grains_opts = grains.get('docker_opts', '') -%}
|
||||
{% set e2e_opts = '' -%}
|
||||
{% if pillar.get('e2e_storage_test_environment', '').lower() == 'true' -%}
|
||||
{% set e2e_opts = '-s devicemapper' -%}
|
||||
{% endif -%}
|
||||
{% set bridge_opts = "--bridge=cbr0" %}
|
||||
{% if pillar.get('network_provider', '').lower() == 'kubenet' %}
|
||||
{% set bridge_opts = "" %}
|
||||
{% endif -%}
|
||||
{% if pillar.get('network_provider', '').lower() == 'cni' %}
|
||||
{% set bridge_opts = "" %}
|
||||
{% endif -%}
|
||||
{% set log_level = "--log-level=warn" -%}
|
||||
{% if pillar['docker_test_log_level'] is defined -%}
|
||||
{% set log_level = pillar['docker_test_log_level'] -%}
|
||||
{% endif -%}
|
||||
DOCKER_OPTS="{{grains_opts}} {{e2e_opts}} {{bridge_opts}} --iptables=false --ip-masq=false {{log_level}}"
|
||||
DOCKER_NOFILE=1000000
|
44
vendor/k8s.io/kubernetes/cluster/saltbase/salt/docker/docker-healthcheck
generated
vendored
Executable file
44
vendor/k8s.io/kubernetes/cluster/saltbase/salt/docker/docker-healthcheck
generated
vendored
Executable file
|
@ -0,0 +1,44 @@
|
|||
#!/bin/bash
|
||||
|
||||
# Copyright 2015 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# This script is intended to be run periodically, to check the health
|
||||
# of docker. If it detects a failure, it will restart docker using systemctl.
|
||||
|
||||
if timeout 10 docker version > /dev/null; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
echo "docker failed"
|
||||
echo "Giving docker 30 seconds grace before restarting"
|
||||
sleep 30
|
||||
|
||||
if timeout 10 docker version > /dev/null; then
|
||||
echo "docker recovered"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
echo "docker still down; triggering docker restart"
|
||||
systemctl restart docker
|
||||
|
||||
echo "Waiting 60 seconds to give docker time to start"
|
||||
sleep 60
|
||||
|
||||
if timeout 10 docker version > /dev/null; then
|
||||
echo "docker recovered"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
echo "docker still failing"
|
9
vendor/k8s.io/kubernetes/cluster/saltbase/salt/docker/docker-healthcheck.service
generated
vendored
Normal file
9
vendor/k8s.io/kubernetes/cluster/saltbase/salt/docker/docker-healthcheck.service
generated
vendored
Normal file
|
@ -0,0 +1,9 @@
|
|||
[Unit]
|
||||
Description=Run docker-healthcheck once
|
||||
|
||||
[Service]
|
||||
Type=oneshot
|
||||
ExecStart=/opt/kubernetes/helpers/docker-healthcheck
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
9
vendor/k8s.io/kubernetes/cluster/saltbase/salt/docker/docker-healthcheck.timer
generated
vendored
Normal file
9
vendor/k8s.io/kubernetes/cluster/saltbase/salt/docker/docker-healthcheck.timer
generated
vendored
Normal file
|
@ -0,0 +1,9 @@
|
|||
[Unit]
|
||||
Description=Trigger docker-healthcheck periodically
|
||||
|
||||
[Timer]
|
||||
OnUnitInactiveSec=10s
|
||||
Unit=docker-healthcheck.service
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
22
vendor/k8s.io/kubernetes/cluster/saltbase/salt/docker/docker-prestart
generated
vendored
Executable file
22
vendor/k8s.io/kubernetes/cluster/saltbase/salt/docker/docker-prestart
generated
vendored
Executable file
|
@ -0,0 +1,22 @@
|
|||
#!/bin/bash
|
||||
|
||||
# Copyright 2015 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# This script is intended to be run before we start Docker.
|
||||
|
||||
# cleanup docker network checkpoint to avoid running into known issue
|
||||
# of docker (https://github.com/docker/docker/issues/18283)
|
||||
rm -rf /var/lib/docker/network
|
||||
|
1
vendor/k8s.io/kubernetes/cluster/saltbase/salt/docker/docker.list
generated
vendored
Normal file
1
vendor/k8s.io/kubernetes/cluster/saltbase/salt/docker/docker.list
generated
vendored
Normal file
|
@ -0,0 +1 @@
|
|||
deb https://apt.dockerproject.org/repo debian-{{ salt['grains.get']('oscodename') }} main
|
21
vendor/k8s.io/kubernetes/cluster/saltbase/salt/docker/docker.service
generated
vendored
Normal file
21
vendor/k8s.io/kubernetes/cluster/saltbase/salt/docker/docker.service
generated
vendored
Normal file
|
@ -0,0 +1,21 @@
|
|||
[Unit]
|
||||
Description=Docker Application Container Engine
|
||||
Documentation=https://docs.docker.com
|
||||
After=network.target docker.socket
|
||||
Requires=docker.socket
|
||||
|
||||
[Service]
|
||||
Type=notify
|
||||
EnvironmentFile={{ environment_file }}
|
||||
ExecStart=/usr/bin/docker daemon -H fd:// "$DOCKER_OPTS"
|
||||
MountFlags=slave
|
||||
LimitNOFILE=1048576
|
||||
LimitNPROC=1048576
|
||||
LimitCORE=infinity
|
||||
Restart=always
|
||||
RestartSec=2s
|
||||
StartLimitInterval=0
|
||||
ExecStartPre=/opt/kubernetes/helpers/docker-prestart
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
535
vendor/k8s.io/kubernetes/cluster/saltbase/salt/docker/init.sls
generated
vendored
Normal file
535
vendor/k8s.io/kubernetes/cluster/saltbase/salt/docker/init.sls
generated
vendored
Normal file
|
@ -0,0 +1,535 @@
|
|||
{% if pillar.get('is_systemd') %}
|
||||
{% set environment_file = '/etc/sysconfig/docker' %}
|
||||
{% else %}
|
||||
{% set environment_file = '/etc/default/docker' %}
|
||||
{% endif %}
|
||||
|
||||
bridge-utils:
|
||||
pkg.installed
|
||||
|
||||
{% if grains.os_family == 'RedHat' %}
|
||||
|
||||
{{ environment_file }}:
|
||||
file.managed:
|
||||
- source: salt://docker/default
|
||||
- template: jinja
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 644
|
||||
- makedirs: true
|
||||
|
||||
{% if grains.cloud is defined and grains.cloud == 'openstack' %}
|
||||
|
||||
cbr0:
|
||||
# workaround https://github.com/saltstack/salt/issues/20570
|
||||
kmod.present:
|
||||
- name: bridge
|
||||
|
||||
network.managed:
|
||||
- enabled: True
|
||||
- type: bridge
|
||||
- proto: none
|
||||
- ports: none
|
||||
- bridge: cbr0
|
||||
- delay: 0
|
||||
- bypassfirewall: True
|
||||
- require_in:
|
||||
- service: docker
|
||||
- require:
|
||||
- kmod: cbr0
|
||||
{% endif %}
|
||||
|
||||
{% if (grains.os == 'Fedora' and grains.osrelease_info[0] >= 22) or (grains.os == 'CentOS' and grains.osrelease_info[0] >= 7) %}
|
||||
|
||||
docker:
|
||||
pkg:
|
||||
- installed
|
||||
service.running:
|
||||
- enable: True
|
||||
- require:
|
||||
- pkg: docker
|
||||
- watch:
|
||||
- file: {{ environment_file }}
|
||||
- pkg: docker
|
||||
|
||||
{% else %}
|
||||
|
||||
docker-io:
|
||||
pkg:
|
||||
- installed
|
||||
|
||||
docker:
|
||||
service.running:
|
||||
- enable: True
|
||||
- require:
|
||||
- pkg: docker-io
|
||||
- watch:
|
||||
- file: {{ environment_file }}
|
||||
- pkg: docker-io
|
||||
|
||||
{% endif %}
|
||||
{% elif grains.cloud is defined and grains.cloud == 'azure-legacy' %}
|
||||
|
||||
{% if pillar.get('is_systemd') %}
|
||||
|
||||
{{ pillar.get('systemd_system_path') }}/docker.service:
|
||||
file.managed:
|
||||
- source: salt://docker/docker.service
|
||||
- template: jinja
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 644
|
||||
- defaults:
|
||||
environment_file: {{ environment_file }}
|
||||
|
||||
# The docker service.running block below doesn't work reliably
|
||||
# Instead we run our script which e.g. does a systemd daemon-reload
|
||||
# But we keep the service block below, so it can be used by dependencies
|
||||
# TODO: Fix this
|
||||
fix-service-docker:
|
||||
cmd.wait:
|
||||
- name: /opt/kubernetes/helpers/services bounce docker
|
||||
- watch:
|
||||
- file: {{ pillar.get('systemd_system_path') }}/docker.service
|
||||
- file: {{ environment_file }}
|
||||
{% endif %}
|
||||
|
||||
{{ environment_file }}:
|
||||
file.managed:
|
||||
- source: salt://docker/docker-defaults
|
||||
- template: jinja
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 644
|
||||
- makedirs: true
|
||||
- require:
|
||||
- pkg: docker-engine
|
||||
|
||||
apt-key:
|
||||
pkgrepo.managed:
|
||||
- humanname: Dotdeb
|
||||
- name: deb https://apt.dockerproject.org/repo ubuntu-trusty main
|
||||
- dist: ubuntu-trusty
|
||||
- file: /etc/apt/sources.list.d/docker.list
|
||||
- keyid: 58118E89F3A912897C070ADBF76221572C52609D
|
||||
- keyserver: hkp://p80.pool.sks-keyservers.net:80
|
||||
|
||||
lxc-docker:
|
||||
pkg:
|
||||
- purged
|
||||
|
||||
docker-io:
|
||||
pkg:
|
||||
- purged
|
||||
|
||||
cbr0:
|
||||
network.managed:
|
||||
- enabled: True
|
||||
- type: bridge
|
||||
{% if grains['roles'][0] == 'kubernetes-pool' %}
|
||||
- proto: none
|
||||
{% else %}
|
||||
- proto: dhcp
|
||||
{% endif %}
|
||||
- ports: none
|
||||
- bridge: cbr0
|
||||
{% if grains['roles'][0] == 'kubernetes-pool' %}
|
||||
- ipaddr: {{ grains['cbr-cidr'] }}
|
||||
{% endif %}
|
||||
- delay: 0
|
||||
- bypassfirewall: True
|
||||
- require_in:
|
||||
- service: docker
|
||||
|
||||
docker-engine:
|
||||
pkg:
|
||||
- installed
|
||||
- require:
|
||||
- pkgrepo: 'apt-key'
|
||||
|
||||
docker:
|
||||
service.running:
|
||||
- enable: True
|
||||
- require:
|
||||
- file: {{ environment_file }}
|
||||
- watch:
|
||||
- file: {{ environment_file }}
|
||||
|
||||
{% elif grains.cloud is defined and grains.cloud in ['photon-controller'] and grains.os == 'Debian' and grains.osrelease_info[0] >=8 %}
|
||||
|
||||
{% if pillar.get('is_systemd') %}
|
||||
|
||||
/opt/kubernetes/helpers/docker-prestart:
|
||||
file.managed:
|
||||
- source: salt://docker/docker-prestart
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 755
|
||||
|
||||
{{ pillar.get('systemd_system_path') }}/docker.service:
|
||||
file.managed:
|
||||
- source: salt://docker/docker.service
|
||||
- template: jinja
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 644
|
||||
- defaults:
|
||||
environment_file: {{ environment_file }}
|
||||
- require:
|
||||
- file: /opt/kubernetes/helpers/docker-prestart
|
||||
- pkg: docker-engine
|
||||
|
||||
# The docker service.running block below doesn't work reliably
|
||||
# Instead we run our script which e.g. does a systemd daemon-reload
|
||||
# But we keep the service block below, so it can be used by dependencies
|
||||
# TODO: Fix this
|
||||
fix-service-docker:
|
||||
cmd.wait:
|
||||
- name: /opt/kubernetes/helpers/services bounce docker
|
||||
- watch:
|
||||
- file: {{ pillar.get('systemd_system_path') }}/docker.service
|
||||
- file: {{ environment_file }}
|
||||
{% endif %}
|
||||
|
||||
{{ environment_file }}:
|
||||
file.managed:
|
||||
- source: salt://docker/docker-defaults
|
||||
- template: jinja
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 644
|
||||
- makedirs: true
|
||||
- require:
|
||||
- pkg: docker-engine
|
||||
|
||||
apt-key:
|
||||
cmd.run:
|
||||
- name: 'apt-key adv --keyserver hkp://p80.pool.sks-keyservers.net:80 --recv-keys 58118E89F3A912897C070ADBF76221572C52609D'
|
||||
- unless: 'apt-key finger | grep "5811 8E89"'
|
||||
|
||||
apt-update:
|
||||
cmd.run:
|
||||
- name: '/usr/bin/apt-get update -y'
|
||||
- require:
|
||||
- cmd : 'apt-key'
|
||||
|
||||
lxc-docker:
|
||||
pkg:
|
||||
- purged
|
||||
|
||||
docker-io:
|
||||
pkg:
|
||||
- purged
|
||||
|
||||
cbr0:
|
||||
network.managed:
|
||||
- enabled: True
|
||||
- type: bridge
|
||||
- proto: dhcp
|
||||
- ports: none
|
||||
- bridge: cbr0
|
||||
- delay: 0
|
||||
- bypassfirewall: True
|
||||
- require_in:
|
||||
- service: docker
|
||||
|
||||
/etc/apt/sources.list.d/docker.list:
|
||||
file.managed:
|
||||
- source: salt://docker/docker.list
|
||||
- template: jinja
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 644
|
||||
- require:
|
||||
- cmd: 'apt-update'
|
||||
|
||||
# restricting docker version to 1.9. with older version of docker we are facing
|
||||
# issue https://github.com/docker/docker/issues/18793.
|
||||
# newer version of docker 1.10.0 is not well tested yet.
|
||||
# full comments: https://github.com/kubernetes/kubernetes/pull/20851
|
||||
docker-engine:
|
||||
pkg:
|
||||
- installed
|
||||
- version: 1.9.*
|
||||
- require:
|
||||
- file: /etc/apt/sources.list.d/docker.list
|
||||
docker:
|
||||
service.running:
|
||||
- enable: True
|
||||
- require:
|
||||
- file: {{ environment_file }}
|
||||
- watch:
|
||||
- file: {{ environment_file }}
|
||||
|
||||
{% else %}
|
||||
|
||||
{% if grains.cloud is defined
|
||||
and grains.cloud == 'gce' %}
|
||||
# The default GCE images have ip_forwarding explicitly set to 0.
|
||||
# Here we take care of commenting that out.
|
||||
/etc/sysctl.d/11-gce-network-security.conf:
|
||||
file.replace:
|
||||
- pattern: '^net.ipv4.ip_forward=0'
|
||||
- repl: '# net.ipv4.ip_forward=0'
|
||||
{% endif %}
|
||||
|
||||
# Work around Salt #18089: https://github.com/saltstack/salt/issues/18089
|
||||
/etc/sysctl.d/99-salt.conf:
|
||||
file.touch
|
||||
|
||||
# TODO: This should really be based on network strategy instead of os_family
|
||||
net.ipv4.ip_forward:
|
||||
sysctl.present:
|
||||
- value: 1
|
||||
|
||||
{% if pillar.get('softlockup_panic', '').lower() == 'true' %}
|
||||
# TODO(dchen1107) Remove this once kernel.softlockup_panic is built into the CVM image.
|
||||
/etc/sysctl.conf:
|
||||
file.append:
|
||||
- text:
|
||||
- "kernel.softlockup_panic = 1"
|
||||
- "kernel.softlockup_all_cpu_backtrace = 1"
|
||||
|
||||
'sysctl-reload':
|
||||
cmd.run:
|
||||
- name: 'sysctl --system'
|
||||
- unless: 'sysctl -a | grep "kernel.softlockup_panic = 1"'
|
||||
{% endif %}
|
||||
|
||||
{{ environment_file }}:
|
||||
file.managed:
|
||||
- source: salt://docker/docker-defaults
|
||||
- template: jinja
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 644
|
||||
- makedirs: true
|
||||
|
||||
# Docker is on the ContainerVM image by default. The following
|
||||
# variables are provided for other cloud providers, and for testing and dire circumstances, to allow
|
||||
# overriding the Docker version that's in a ContainerVM image.
|
||||
#
|
||||
# To change:
|
||||
#
|
||||
# 1. Find new deb name at:
|
||||
# http://apt.dockerproject.org/repo/pool/main/d/docker-engine
|
||||
# 2. Download based on that:
|
||||
# curl -O http://apt.dockerproject.org/repo/pool/main/d/docker-engine/<deb>
|
||||
# 3. Upload to GCS:
|
||||
# gsutil cp <deb> gs://kubernetes-release/docker/<deb>
|
||||
# 4. Make it world readable:
|
||||
# gsutil acl ch -R -g all:R gs://kubernetes-release/docker/<deb>
|
||||
# 5. Get a hash of the deb:
|
||||
# shasum <deb>
|
||||
# 6. Update override_deb, override_deb_sha1, override_docker_ver with new
|
||||
# deb name, new hash and new version
|
||||
|
||||
{% set storage_base='https://storage.googleapis.com/kubernetes-release/docker/' %}
|
||||
|
||||
{% set override_deb_url='' %}
|
||||
|
||||
{% if grains.get('cloud', '') == 'gce'
|
||||
and grains.get('os_family', '') == 'Debian'
|
||||
and grains.get('oscodename', '') == 'wheezy' -%}
|
||||
{% set docker_pkg_name='' %}
|
||||
{% set override_deb='' %}
|
||||
{% set override_deb_sha1='' %}
|
||||
{% set override_docker_ver='' %}
|
||||
|
||||
{% elif grains.get('cloud', '') == 'gce'
|
||||
and grains.get('os_family', '') == 'Debian'
|
||||
and grains.get('oscodename', '') == 'jessie' -%}
|
||||
{% set docker_pkg_name='' %}
|
||||
{% set override_deb='' %}
|
||||
{% set override_deb_sha1='' %}
|
||||
{% set override_docker_ver='' %}
|
||||
|
||||
{% elif grains.get('cloud', '') == 'aws'
|
||||
and grains.get('os_family', '') == 'Debian'
|
||||
and grains.get('oscodename', '') == 'jessie' -%}
|
||||
# TODO: Get from google storage?
|
||||
{% set docker_pkg_name='docker-engine' %}
|
||||
{% set override_docker_ver='1.11.2-0~jessie' %}
|
||||
{% set override_deb='docker-engine_1.11.2-0~jessie_amd64.deb' %}
|
||||
{% set override_deb_url='http://apt.dockerproject.org/repo/pool/main/d/docker-engine/docker-engine_1.11.2-0~jessie_amd64.deb' %}
|
||||
{% set override_deb_sha1='c312f1f6fa0b34df4589bb812e4f7af8e28fd51d' %}
|
||||
|
||||
# Ubuntu presents as os_family=Debian, osfullname=Ubuntu
|
||||
{% elif grains.get('cloud', '') == 'aws'
|
||||
and grains.get('os_family', '') == 'Debian'
|
||||
and grains.get('oscodename', '') == 'trusty' -%}
|
||||
# TODO: Get from google storage?
|
||||
{% set docker_pkg_name='docker-engine' %}
|
||||
{% set override_docker_ver='1.11.2-0~trusty' %}
|
||||
{% set override_deb='docker-engine_1.11.2-0~trusty_amd64.deb' %}
|
||||
{% set override_deb_url='http://apt.dockerproject.org/repo/pool/main/d/docker-engine/docker-engine_1.11.2-0~trusty_amd64.deb' %}
|
||||
{% set override_deb_sha1='022dee31e68c6d572eaac750915786e4a6729d2a' %}
|
||||
|
||||
{% elif grains.get('cloud', '') == 'aws'
|
||||
and grains.get('os_family', '') == 'Debian'
|
||||
and grains.get('oscodename', '') == 'wily' -%}
|
||||
# TODO: Get from google storage?
|
||||
{% set docker_pkg_name='docker-engine' %}
|
||||
{% set override_docker_ver='1.11.2-0~wily' %}
|
||||
{% set override_deb='docker-engine_1.11.2-0~wily_amd64.deb' %}
|
||||
{% set override_deb_url='http://apt.dockerproject.org/repo/pool/main/d/docker-engine/docker-engine_1.11.2-0~wily_amd64.deb' %}
|
||||
{% set override_deb_sha1='3e02f51fe18aa777eeb1676c3d9a75e5ea6d96c9' %}
|
||||
|
||||
{% else %}
|
||||
{% set docker_pkg_name='lxc-docker-1.7.1' %}
|
||||
{% set override_docker_ver='1.7.1' %}
|
||||
{% set override_deb='lxc-docker-1.7.1_1.7.1_amd64.deb' %}
|
||||
{% set override_deb_sha1='81abef31dd2c616883a61f85bfb294d743b1c889' %}
|
||||
{% endif %}
|
||||
|
||||
{% if override_deb_url == '' %}
|
||||
{% set override_deb_url=storage_base + override_deb %}
|
||||
{% endif %}
|
||||
|
||||
{% if override_docker_ver != '' %}
|
||||
purge-old-docker-package:
|
||||
pkg.removed:
|
||||
- pkgs:
|
||||
- lxc-docker-1.6.2
|
||||
|
||||
/var/cache/docker-install/{{ override_deb }}:
|
||||
file.managed:
|
||||
- source: {{ override_deb_url }}
|
||||
- source_hash: sha1={{ override_deb_sha1 }}
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 644
|
||||
- makedirs: true
|
||||
|
||||
# Drop the license file into /usr/share so that everything is crystal clear.
|
||||
/usr/share/doc/docker/apache.txt:
|
||||
file.managed:
|
||||
- source: {{ storage_base }}apache2.txt
|
||||
- source_hash: sha1=2b8b815229aa8a61e483fb4ba0588b8b6c491890
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 644
|
||||
- makedirs: true
|
||||
|
||||
libltdl7:
|
||||
pkg.installed
|
||||
|
||||
docker-upgrade:
|
||||
cmd.run:
|
||||
- name: /opt/kubernetes/helpers/pkg install-no-start {{ docker_pkg_name }} {{ override_docker_ver }} /var/cache/docker-install/{{ override_deb }}
|
||||
- require:
|
||||
- file: /var/cache/docker-install/{{ override_deb }}
|
||||
- pkg: libltdl7
|
||||
|
||||
{% endif %} # end override_docker_ver != ''
|
||||
|
||||
{% if pillar.get('is_systemd') %}
|
||||
|
||||
/opt/kubernetes/helpers/docker-prestart:
|
||||
file.managed:
|
||||
- source: salt://docker/docker-prestart
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 755
|
||||
|
||||
# Default docker systemd unit file doesn't use an EnvironmentFile; replace it with one that does.
|
||||
{{ pillar.get('systemd_system_path') }}/docker.service:
|
||||
file.managed:
|
||||
- source: salt://docker/docker.service
|
||||
- template: jinja
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 644
|
||||
- defaults:
|
||||
environment_file: {{ environment_file }}
|
||||
- require:
|
||||
- file: /opt/kubernetes/helpers/docker-prestart
|
||||
|
||||
# The docker service.running block below doesn't work reliably
|
||||
# Instead we run our script which e.g. does a systemd daemon-reload
|
||||
# But we keep the service block below, so it can be used by dependencies
|
||||
# TODO: Fix this
|
||||
fix-service-docker:
|
||||
cmd.wait:
|
||||
- name: /opt/kubernetes/helpers/services enable docker
|
||||
- watch:
|
||||
- file: {{ pillar.get('systemd_system_path') }}/docker.service
|
||||
- file: {{ environment_file }}
|
||||
{% if override_docker_ver != '' %}
|
||||
- require:
|
||||
- cmd: docker-upgrade
|
||||
{% endif %}
|
||||
|
||||
/opt/kubernetes/helpers/docker-healthcheck:
|
||||
file.managed:
|
||||
- source: salt://docker/docker-healthcheck
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 755
|
||||
|
||||
{{ pillar.get('systemd_system_path') }}/docker-healthcheck.service:
|
||||
file.managed:
|
||||
- source: salt://docker/docker-healthcheck.service
|
||||
- template: jinja
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 644
|
||||
|
||||
{{ pillar.get('systemd_system_path') }}/docker-healthcheck.timer:
|
||||
file.managed:
|
||||
- source: salt://docker/docker-healthcheck.timer
|
||||
- template: jinja
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 644
|
||||
|
||||
# Tell systemd to load the timer
|
||||
fix-systemd-docker-healthcheck-timer:
|
||||
cmd.wait:
|
||||
- name: /opt/kubernetes/helpers/services bounce docker-healthcheck.timer
|
||||
- watch:
|
||||
- file: {{ pillar.get('systemd_system_path') }}/docker-healthcheck.timer
|
||||
|
||||
# Trigger a first run of docker-healthcheck; needed because the timer fires 10s after the previous run.
|
||||
fix-systemd-docker-healthcheck-service:
|
||||
cmd.wait:
|
||||
- name: /opt/kubernetes/helpers/services bounce docker-healthcheck.service
|
||||
- watch:
|
||||
- file: {{ pillar.get('systemd_system_path') }}/docker-healthcheck.service
|
||||
- require:
|
||||
- cmd: fix-service-docker
|
||||
|
||||
{% endif %}
|
||||
|
||||
docker:
|
||||
# Starting Docker is racy on aws for some reason. To be honest, since Monit
|
||||
# is managing Docker restart we should probably just delete this whole thing
|
||||
# but the kubernetes components use salt 'require' to set up a dag, and that
|
||||
# complicated and scary to unwind.
|
||||
# On AWS, we use a trick now... We don't start the docker service through Salt.
|
||||
# Kubelet or our health checker will start it. But we use service.enabled,
|
||||
# so we still have a `service: docker` node for our DAG.
|
||||
{% if grains.cloud is defined and grains.cloud == 'aws' %}
|
||||
service.enabled:
|
||||
{% else %}
|
||||
service.running:
|
||||
- enable: True
|
||||
{% endif %}
|
||||
# If we put a watch on this, salt will try to start the service.
|
||||
# We put the watch on the fixer instead
|
||||
{% if not pillar.get('is_systemd') %}
|
||||
- watch:
|
||||
- file: {{ environment_file }}
|
||||
{% if override_docker_ver != '' %}
|
||||
- cmd: docker-upgrade
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
- require:
|
||||
- file: {{ environment_file }}
|
||||
{% if override_docker_ver != '' %}
|
||||
- cmd: docker-upgrade
|
||||
{% endif %}
|
||||
{% if pillar.get('is_systemd') %}
|
||||
- cmd: fix-service-docker
|
||||
{% endif %}
|
||||
{% endif %} # end grains.os_family != 'RedHat'
|
61
vendor/k8s.io/kubernetes/cluster/saltbase/salt/e2e-image-puller/e2e-image-puller.manifest
generated
vendored
Normal file
61
vendor/k8s.io/kubernetes/cluster/saltbase/salt/e2e-image-puller/e2e-image-puller.manifest
generated
vendored
Normal file
|
@ -0,0 +1,61 @@
|
|||
# e2e-image-puller seeds nodes in an e2e cluster with test images.
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: e2e-image-puller
|
||||
namespace: kube-system
|
||||
labels:
|
||||
name: e2e-image-puller
|
||||
spec:
|
||||
containers:
|
||||
- name: image-puller
|
||||
resources:
|
||||
requests:
|
||||
cpu: 100m
|
||||
limits:
|
||||
cpu: 100m
|
||||
image: gcr.io/google_containers/busybox:1.24
|
||||
# TODO: Replace this with a go script that pulls in parallel?
|
||||
# Currently it takes ~5m to pull all e2e images, so this is OK, and
|
||||
# fewer moving parts is always better.
|
||||
# TODO: Replace the hardcoded image list with an autogen list; the list is
|
||||
# currently hard-coded for static verification. It was generated via:
|
||||
# grep -Iiroh "gcr.io/google_.*" "${KUBE_ROOT}/test/e2e" | \
|
||||
# sed -e "s/[,\")}]//g" | awk '{print $1}' | sort | uniq | tr '\n' ' '
|
||||
# We always want the subshell to exit 0 so this pod doesn't end up
|
||||
# blocking tests in an Error state.
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- "for i in gcr.io/google_containers/busybox gcr.io/google_containers/busybox:1.24 gcr.io/google_containers/dnsutils:e2e gcr.io/google_containers/eptest:0.1 gcr.io/google_containers/fakegitserver:0.1 gcr.io/google_containers/hostexec:1.2 gcr.io/google_containers/iperf:e2e gcr.io/google_containers/jessie-dnsutils:e2e gcr.io/google_containers/liveness:e2e gcr.io/google_containers/mounttest:0.7 gcr.io/google_containers/mounttest-user:0.3 gcr.io/google_containers/netexec:1.4 gcr.io/google_containers/netexec:1.7 gcr.io/google_containers/nettest:1.7 gcr.io/google_containers/nettest:1.8 gcr.io/google_containers/nginx-slim:0.7 gcr.io/google_containers/nginx-slim:0.8 gcr.io/google_containers/n-way-http:1.0 gcr.io/google_containers/pause:2.0 gcr.io/google_containers/pause-amd64:3.0 gcr.io/google_containers/porter:cd5cb5791ebaa8641955f0e8c2a9bed669b1eaab gcr.io/google_containers/portforwardtester:1.0 gcr.io/google_containers/redis:e2e gcr.io/google_containers/resource_consumer:beta4 gcr.io/google_containers/resource_consumer/controller:beta4 gcr.io/google_containers/serve_hostname:v1.4 gcr.io/google_containers/test-webserver:e2e gcr.io/google_containers/ubuntu:14.04 gcr.io/google_containers/update-demo:kitten gcr.io/google_containers/update-demo:nautilus gcr.io/google_containers/volume-ceph:0.1 gcr.io/google_containers/volume-gluster:0.2 gcr.io/google_containers/volume-iscsi:0.1 gcr.io/google_containers/volume-nfs:0.6 gcr.io/google_containers/volume-rbd:0.1 gcr.io/google_samples/gb-redisslave:v1 gcr.io/google_containers/redis:v1; do echo $(date '+%X') pulling $i; docker pull $i 1>/dev/null; done; exit 0;"
|
||||
securityContext:
|
||||
privileged: true
|
||||
volumeMounts:
|
||||
- mountPath: /var/run/docker.sock
|
||||
name: socket
|
||||
- mountPath: /usr/bin/docker
|
||||
name: docker
|
||||
# Add a container that runs a health-check
|
||||
- name: nethealth-check
|
||||
resources:
|
||||
requests:
|
||||
cpu: 100m
|
||||
limits:
|
||||
cpu: 100m
|
||||
image: gcr.io/google_containers/kube-nethealth-amd64:1.0
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- "/usr/bin/nethealth || true"
|
||||
volumes:
|
||||
- hostPath:
|
||||
path: /var/run/docker.sock
|
||||
name: socket
|
||||
- hostPath:
|
||||
path: /usr/bin/docker
|
||||
name: docker
|
||||
# This pod is really fire-and-forget.
|
||||
restartPolicy: OnFailure
|
||||
# This pod needs hostNetworking for true VM perf measurement as well as avoiding cbr0 issues
|
||||
hostNetwork: true
|
||||
|
12
vendor/k8s.io/kubernetes/cluster/saltbase/salt/e2e-image-puller/init.sls
generated
vendored
Normal file
12
vendor/k8s.io/kubernetes/cluster/saltbase/salt/e2e-image-puller/init.sls
generated
vendored
Normal file
|
@ -0,0 +1,12 @@
|
|||
/etc/kubernetes/manifests/e2e-image-puller.manifest:
|
||||
file.managed:
|
||||
- source: salt://e2e-image-puller/e2e-image-puller.manifest
|
||||
- template: jinja
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 644
|
||||
- makedirs: true
|
||||
- dir_mode: 755
|
||||
- require:
|
||||
- service: docker
|
||||
- service: kubelet
|
28
vendor/k8s.io/kubernetes/cluster/saltbase/salt/e2e/init.sls
generated
vendored
Normal file
28
vendor/k8s.io/kubernetes/cluster/saltbase/salt/e2e/init.sls
generated
vendored
Normal file
|
@ -0,0 +1,28 @@
|
|||
e2e:
|
||||
# Install various packages required by e2e tests to all hosts.
|
||||
pkg.installed:
|
||||
- refresh: true
|
||||
- pkgs:
|
||||
- targetcli
|
||||
- ceph
|
||||
{% if grains['os_family'] == 'RedHat' %}
|
||||
- glusterfs-fuse
|
||||
- rbd-fuse
|
||||
- iscsi-initiator-utils
|
||||
- nfs-utils
|
||||
{% else %}
|
||||
- glusterfs-client
|
||||
- open-iscsi
|
||||
- iscsitarget-dkms
|
||||
- nfs-common
|
||||
{% endif %}
|
||||
|
||||
|
||||
|
||||
{% if grains['os_family'] == 'Debian' %}
|
||||
# On Debian, re-start open-iscsi to generate unique
|
||||
# /etc/iscsi/initiatorname.iscsi
|
||||
open-iscsi:
|
||||
cmd.run:
|
||||
- name: 'service open-iscsi restart'
|
||||
{% endif %}
|
113
vendor/k8s.io/kubernetes/cluster/saltbase/salt/etcd/etcd.manifest
generated
vendored
Normal file
113
vendor/k8s.io/kubernetes/cluster/saltbase/salt/etcd/etcd.manifest
generated
vendored
Normal file
|
@ -0,0 +1,113 @@
|
|||
{% set etcd_protocol = 'http' -%}
|
||||
{% set etcd_creds = '' -%}
|
||||
{% if pillar.get('etcd_over_ssl', '').lower() == 'true' -%}
|
||||
{% set etcd_protocol = 'https' -%}
|
||||
{% set etcd_creds = '--peer-trusted-ca-file /srv/kubernetes/etcd-ca.crt --peer-cert-file /srv/kubernetes/etcd-peer.crt --peer-key-file /srv/kubernetes/etcd-peer.key -peer-client-cert-auth' -%}
|
||||
{% endif -%}
|
||||
{% set cluster_state = 'new' -%}
|
||||
{% set hostname = pillar.get('hostname', '') -%}
|
||||
{% set etcd_cluster_array = (pillar.get('initial_etcd_cluster') or hostname).split(',') -%}
|
||||
{% set etcd_cluster = '' -%}
|
||||
{# We use vars dictionary to pass variables set inside the for loop, because jinja defines new variables inside the for loop that hide variables from the outside. #}
|
||||
{% set vars = {'etcd_cluster': '', 'cluster_state': cluster_state} -%}
|
||||
{% for host in etcd_cluster_array -%}
|
||||
{% if etcd_cluster != '' -%}
|
||||
{% set cluster_state = 'existing' -%}
|
||||
{% set etcd_cluster = etcd_cluster ~ ',' -%}
|
||||
{% endif -%}
|
||||
{% set etcd_cluster = etcd_cluster ~ 'etcd-' ~ host ~ '=' ~ etcd_protocol ~'://' ~ host ~ ':' ~ server_port -%}
|
||||
{% do vars.update({'etcd_cluster': etcd_cluster, 'cluster_state': cluster_state}) -%}
|
||||
{% endfor -%}
|
||||
{% set etcd_cluster = vars.etcd_cluster -%}
|
||||
{% set cluster_state = vars.cluster_state -%}
|
||||
{% set storage_backend = pillar.get('storage_backend', 'etcd3') -%}
|
||||
{% set quota_bytes = '' -%}
|
||||
{% if pillar.get('storage_backend', 'etcd3') == 'etcd3' -%}
|
||||
{% set quota_bytes = '--quota-backend-bytes=4294967296' -%}
|
||||
{% endif -%}
|
||||
{% set srv_kube_path = "/srv/kubernetes" -%}
|
||||
|
||||
{
|
||||
"apiVersion": "v1",
|
||||
"kind": "Pod",
|
||||
"metadata": {
|
||||
"name":"etcd-server{{ suffix }}",
|
||||
"namespace": "kube-system"
|
||||
},
|
||||
"spec":{
|
||||
"hostNetwork": true,
|
||||
"containers":[
|
||||
{
|
||||
"name": "etcd-container",
|
||||
"image": "gcr.io/google_containers/etcd:{{ pillar.get('etcd_docker_tag', '3.0.14-alpha.1') }}",
|
||||
"resources": {
|
||||
"requests": {
|
||||
"cpu": {{ cpulimit }}
|
||||
}
|
||||
},
|
||||
"command": [
|
||||
"/bin/sh",
|
||||
"-c",
|
||||
"if [ -e /usr/local/bin/migrate-if-needed.sh ]; then /usr/local/bin/migrate-if-needed.sh 1>>/var/log/etcd{{ suffix }}.log 2>&1; fi; /usr/local/bin/etcd --name etcd-{{ hostname }} --listen-peer-urls {{ etcd_protocol }}://{{ hostname }}:{{ server_port }} --initial-advertise-peer-urls {{ etcd_protocol }}://{{ hostname }}:{{ server_port }} --advertise-client-urls http://127.0.0.1:{{ port }} --listen-client-urls http://127.0.0.1:{{ port }} {{ quota_bytes }} --data-dir /var/etcd/data{{ suffix }} --initial-cluster-state {{ cluster_state }} --initial-cluster {{ etcd_cluster }} {{ etcd_creds }} 1>>/var/log/etcd{{ suffix }}.log 2>&1"
|
||||
],
|
||||
"env": [
|
||||
{ "name": "TARGET_STORAGE",
|
||||
"value": "{{ storage_backend }}"
|
||||
},
|
||||
{ "name": "TARGET_VERSION",
|
||||
"value": "{{ pillar.get('etcd_version', '3.0.14') }}"
|
||||
},
|
||||
{ "name": "DATA_DIRECTORY",
|
||||
"value": "/var/etcd/data{{ suffix }}"
|
||||
}
|
||||
],
|
||||
"livenessProbe": {
|
||||
"httpGet": {
|
||||
"host": "127.0.0.1",
|
||||
"port": {{ port }},
|
||||
"path": "/health"
|
||||
},
|
||||
"initialDelaySeconds": 15,
|
||||
"timeoutSeconds": 15
|
||||
},
|
||||
"ports": [
|
||||
{ "name": "serverport",
|
||||
"containerPort": {{ server_port }},
|
||||
"hostPort": {{ server_port }}
|
||||
},
|
||||
{ "name": "clientport",
|
||||
"containerPort": {{ port }},
|
||||
"hostPort": {{ port }}
|
||||
}
|
||||
],
|
||||
"volumeMounts": [
|
||||
{ "name": "varetcd",
|
||||
"mountPath": "/var/etcd",
|
||||
"readOnly": false
|
||||
},
|
||||
{ "name": "varlogetcd",
|
||||
"mountPath": "/var/log/etcd{{ suffix }}.log",
|
||||
"readOnly": false
|
||||
},
|
||||
{ "name": "etc",
|
||||
"mountPath": "{{ srv_kube_path }}",
|
||||
"readOnly": false
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"volumes":[
|
||||
{ "name": "varetcd",
|
||||
"hostPath": {
|
||||
"path": "/mnt/master-pd/var/etcd"}
|
||||
},
|
||||
{ "name": "varlogetcd",
|
||||
"hostPath": {
|
||||
"path": "/var/log/etcd{{ suffix }}.log"}
|
||||
},
|
||||
{ "name": "etc",
|
||||
"hostPath": {
|
||||
"path": "{{ srv_kube_path }}"}
|
||||
}
|
||||
]
|
||||
}}
|
83
vendor/k8s.io/kubernetes/cluster/saltbase/salt/etcd/init.sls
generated
vendored
Normal file
83
vendor/k8s.io/kubernetes/cluster/saltbase/salt/etcd/init.sls
generated
vendored
Normal file
|
@ -0,0 +1,83 @@
|
|||
# Early configurations of Kubernetes ran etcd on the host and as part of a migration step, we began to delete the host etcd
|
||||
# It's possible though that the host has configured a separate etcd to configure other services like Flannel
|
||||
# In that case, we do not want Salt to remove or stop the host service
|
||||
# Note: its imperative that the host installed etcd not conflict with the Kubernetes managed etcd
|
||||
{% if grains['keep_host_etcd'] is not defined %}
|
||||
|
||||
delete_etc_etcd_dir:
|
||||
file.absent:
|
||||
- name: /etc/etcd
|
||||
|
||||
delete_etcd_conf:
|
||||
file.absent:
|
||||
- name: /etc/etcd/etcd.conf
|
||||
|
||||
delete_etcd_default:
|
||||
file.absent:
|
||||
- name: /etc/default/etcd
|
||||
|
||||
{% if pillar.get('is_systemd') %}
|
||||
delete_etcd_service_file:
|
||||
file.absent:
|
||||
- name: {{ pillar.get('systemd_system_path') }}/etcd.service
|
||||
{% endif %}
|
||||
|
||||
delete_etcd_initd:
|
||||
file.absent:
|
||||
- name: /etc/init.d/etcd
|
||||
|
||||
#stop legacy etcd_service
|
||||
stop_etcd-service:
|
||||
service.dead:
|
||||
- name: etcd
|
||||
- enable: None
|
||||
|
||||
{% endif %}
|
||||
|
||||
touch /var/log/etcd.log:
|
||||
cmd.run:
|
||||
- creates: /var/log/etcd.log
|
||||
|
||||
touch /var/log/etcd-events.log:
|
||||
cmd.run:
|
||||
- creates: /var/log/etcd-events.log
|
||||
|
||||
/var/etcd:
|
||||
file.directory:
|
||||
- user: root
|
||||
- group: root
|
||||
- dir_mode: 700
|
||||
- recurse:
|
||||
- user
|
||||
- group
|
||||
- mode
|
||||
|
||||
/etc/kubernetes/manifests/etcd.manifest:
|
||||
file.managed:
|
||||
- source: salt://etcd/etcd.manifest
|
||||
- template: jinja
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 644
|
||||
- makedirs: true
|
||||
- dir_mode: 755
|
||||
- context:
|
||||
suffix: ""
|
||||
port: 2379
|
||||
server_port: 2380
|
||||
cpulimit: '"200m"'
|
||||
|
||||
/etc/kubernetes/manifests/etcd-events.manifest:
|
||||
file.managed:
|
||||
- source: salt://etcd/etcd.manifest
|
||||
- template: jinja
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 644
|
||||
- makedirs: true
|
||||
- dir_mode: 755
|
||||
- context:
|
||||
suffix: "-events"
|
||||
port: 4002
|
||||
server_port: 2381
|
||||
cpulimit: '"100m"'
|
3
vendor/k8s.io/kubernetes/cluster/saltbase/salt/fluentd-gcp/OWNERS
generated
vendored
Normal file
3
vendor/k8s.io/kubernetes/cluster/saltbase/salt/fluentd-gcp/OWNERS
generated
vendored
Normal file
|
@ -0,0 +1,3 @@
|
|||
assignees:
|
||||
- Crassirostris
|
||||
- piosz
|
68
vendor/k8s.io/kubernetes/cluster/saltbase/salt/fluentd-gcp/fluentd-gcp.yaml
generated
vendored
Normal file
68
vendor/k8s.io/kubernetes/cluster/saltbase/salt/fluentd-gcp/fluentd-gcp.yaml
generated
vendored
Normal file
|
@ -0,0 +1,68 @@
|
|||
# please keep this file synchronized with cluster/addons/fluentd-gcp/fluentd-gcp-ds.yaml
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: fluentd-cloud-logging
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: fluentd-logging
|
||||
spec:
|
||||
dnsPolicy: Default
|
||||
containers:
|
||||
- name: fluentd-cloud-logging
|
||||
image: gcr.io/google_containers/fluentd-gcp:1.32
|
||||
# If fluentd consumes its own logs, the following situation may happen:
|
||||
# fluentd fails to send a chunk to the server => writes it to the log =>
|
||||
# tries to send this message to the server => fails to send a chunk and so on.
|
||||
# Writing to a file, which is not exported to the back-end prevents it.
|
||||
# It also allows to increase the fluentd verbosity by default.
|
||||
command:
|
||||
- '/bin/sh'
|
||||
- '-c'
|
||||
- '/run.sh $FLUENTD_ARGS 2>&1 >>/var/log/fluentd.log'
|
||||
resources:
|
||||
limits:
|
||||
memory: 200Mi
|
||||
requests:
|
||||
# Any change here should be accompanied by a proportional change in CPU
|
||||
# requests of other per-node add-ons (e.g. kube-proxy).
|
||||
cpu: 100m
|
||||
memory: 200Mi
|
||||
volumeMounts:
|
||||
- name: varlog
|
||||
mountPath: /var/log
|
||||
- name: varlibdockercontainers
|
||||
mountPath: /var/lib/docker/containers
|
||||
readOnly: true
|
||||
- name: libsystemddir
|
||||
mountPath: /host/lib
|
||||
# Liveness probe is aimed to help in situarions where fluentd
|
||||
# silently hangs for no apparent reasons until manual restart.
|
||||
# The idea of this probe is that if fluentd is not queueing or
|
||||
# flushing chunks for 5 minutes, something is not right. If
|
||||
# you want to change the fluentd configuration, reducing amount of
|
||||
# logs fluentd collects, consider changing the threshold or turning
|
||||
# liveness probe off completely.
|
||||
livenessProbe:
|
||||
initialDelaySeconds: 600
|
||||
periodSeconds: 60
|
||||
exec:
|
||||
command:
|
||||
- '/bin/sh'
|
||||
- '-c'
|
||||
- >
|
||||
LIVENESS_THRESHOLD_SECONDS=${LIVENESS_THRESHOLD_SECONDS:-600};
|
||||
LAST_MODIFIED_DATE=`stat /var/log/fluentd-buffers | grep Modify | sed -r "s/Modify: (.*)/\1/"`;
|
||||
LAST_MODIFIED_TIMESTAMP=`date -d "$LAST_MODIFIED_DATE" +%s`;
|
||||
if [ `date +%s` -gt `expr $LAST_MODIFIED_TIMESTAMP + $LIVENESS_THRESHOLD_SECONDS` ]; then exit 1; fi;
|
||||
terminationGracePeriodSeconds: 30
|
||||
volumes:
|
||||
- name: varlog
|
||||
hostPath:
|
||||
path: /var/log
|
||||
- name: varlibdockercontainers
|
||||
hostPath:
|
||||
path: /var/lib/docker/containers
|
||||
- name: libsystemddir
|
||||
hostPath:
|
||||
path: /usr/lib64
|
10
vendor/k8s.io/kubernetes/cluster/saltbase/salt/fluentd-gcp/init.sls
generated
vendored
Normal file
10
vendor/k8s.io/kubernetes/cluster/saltbase/salt/fluentd-gcp/init.sls
generated
vendored
Normal file
|
@ -0,0 +1,10 @@
|
|||
{% if grains.kubelet_api_servers is not defined -%}
|
||||
/etc/kubernetes/manifests/fluentd-gcp.yaml:
|
||||
file.managed:
|
||||
- source: salt://fluentd-gcp/fluentd-gcp.yaml
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 644
|
||||
- makedirs: true
|
||||
- dir_mode: 755
|
||||
{% endif %}
|
48
vendor/k8s.io/kubernetes/cluster/saltbase/salt/generate-cert/init.sls
generated
vendored
Normal file
48
vendor/k8s.io/kubernetes/cluster/saltbase/salt/generate-cert/init.sls
generated
vendored
Normal file
|
@ -0,0 +1,48 @@
|
|||
{% set master_extra_sans=grains.get('master_extra_sans', '') %}
|
||||
{% if grains.cloud is defined %}
|
||||
{% if grains.cloud == 'gce' %}
|
||||
{% set cert_ip='_use_gce_external_ip_' %}
|
||||
{% endif %}
|
||||
{% if grains.cloud == 'aws' %}
|
||||
{% set cert_ip='_use_aws_external_ip_' %}
|
||||
{% endif %}
|
||||
{% if grains.cloud == 'azure-legacy' %}
|
||||
{% set cert_ip='_use_azure_dns_name_' %}
|
||||
{% endif %}
|
||||
{% if grains.cloud == 'photon-controller' %}
|
||||
{% set cert_ip=grains.ip_interfaces.eth0[0] %}
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
|
||||
# If there is a pillar defined, override any defaults.
|
||||
{% if pillar['cert_ip'] is defined %}
|
||||
{% set cert_ip=pillar['cert_ip'] %}
|
||||
{% endif %}
|
||||
|
||||
{% set certgen="make-cert.sh" %}
|
||||
{% if cert_ip is defined %}
|
||||
{% set certgen="make-ca-cert.sh" %}
|
||||
{% endif %}
|
||||
|
||||
openssl:
|
||||
pkg.installed: []
|
||||
|
||||
kube-cert:
|
||||
group.present:
|
||||
- system: True
|
||||
|
||||
kubernetes-cert:
|
||||
cmd.script:
|
||||
- unless: test -f /srv/kubernetes/server.cert
|
||||
- source: salt://generate-cert/{{certgen}}
|
||||
{% if cert_ip is defined %}
|
||||
- args: {{cert_ip}} {{master_extra_sans}}
|
||||
- require:
|
||||
- pkg: curl
|
||||
{% endif %}
|
||||
- cwd: /
|
||||
- user: root
|
||||
- group: root
|
||||
- shell: /bin/bash
|
||||
- require:
|
||||
- pkg: openssl
|
108
vendor/k8s.io/kubernetes/cluster/saltbase/salt/generate-cert/make-ca-cert.sh
generated
vendored
Executable file
108
vendor/k8s.io/kubernetes/cluster/saltbase/salt/generate-cert/make-ca-cert.sh
generated
vendored
Executable file
|
@ -0,0 +1,108 @@
|
|||
#!/bin/bash
|
||||
|
||||
# Copyright 2014 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
DEBUG="${DEBUG:-false}"
|
||||
|
||||
if [ "${DEBUG}" == "true" ]; then
|
||||
set -x
|
||||
fi
|
||||
|
||||
cert_ip=$1
|
||||
extra_sans=${2:-}
|
||||
cert_dir=${CERT_DIR:-/srv/kubernetes}
|
||||
cert_group=${CERT_GROUP:-kube-cert}
|
||||
|
||||
mkdir -p "$cert_dir"
|
||||
|
||||
use_cn=false
|
||||
|
||||
# TODO: Add support for discovery on other providers?
|
||||
if [ "$cert_ip" == "_use_gce_external_ip_" ]; then
|
||||
cert_ip=$(curl -s -H Metadata-Flavor:Google http://metadata.google.internal./computeMetadata/v1/instance/network-interfaces/0/access-configs/0/external-ip)
|
||||
fi
|
||||
|
||||
if [ "$cert_ip" == "_use_aws_external_ip_" ]; then
|
||||
# If there's no public IP assigned (e.g. this host is running on an internal subnet in a VPC), then
|
||||
# curl will happily spit out the contents of AWS's 404 page and an exit code of zero.
|
||||
#
|
||||
# The string containing the 404 page trips up one of easyrsa's calls to openssl later; whichever
|
||||
# one creates the CA certificate, because the 404 page is > 64 characters.
|
||||
if cert_ip=$(curl -f -s http://169.254.169.254/latest/meta-data/public-ipv4); then
|
||||
:
|
||||
else
|
||||
cert_ip=$(curl -f -s http://169.254.169.254/latest/meta-data/local-ipv4)
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ "$cert_ip" == "_use_azure_dns_name_" ]; then
|
||||
cert_ip=$(uname -n | awk -F. '{ print $2 }').cloudapp.net
|
||||
use_cn=true
|
||||
fi
|
||||
|
||||
sans="IP:${cert_ip}"
|
||||
if [[ -n "${extra_sans}" ]]; then
|
||||
sans="${sans},${extra_sans}"
|
||||
fi
|
||||
|
||||
tmpdir=$(mktemp -d -t kubernetes_cacert.XXXXXX)
|
||||
trap 'rm -rf "${tmpdir}"' EXIT
|
||||
cd "${tmpdir}"
|
||||
|
||||
# TODO: For now, this is a patched tool that makes subject-alt-name work, when
|
||||
# the fix is upstream move back to the upstream easyrsa. This is cached in GCS
|
||||
# but is originally taken from:
|
||||
# https://github.com/brendandburns/easy-rsa/archive/master.tar.gz
|
||||
#
|
||||
# To update, do the following:
|
||||
# curl -o easy-rsa.tar.gz https://github.com/brendandburns/easy-rsa/archive/master.tar.gz
|
||||
# gsutil cp easy-rsa.tar.gz gs://kubernetes-release/easy-rsa/easy-rsa.tar.gz
|
||||
# gsutil acl ch -R -g all:R gs://kubernetes-release/easy-rsa/easy-rsa.tar.gz
|
||||
#
|
||||
# Due to GCS caching of public objects, it may take time for this to be widely
|
||||
# distributed.
|
||||
#
|
||||
# Use ~/kube/easy-rsa.tar.gz if it exists, so that it can be
|
||||
# pre-pushed in cases where an outgoing connection is not allowed.
|
||||
if [ -f ~/kube/easy-rsa.tar.gz ]; then
|
||||
ln -s ~/kube/easy-rsa.tar.gz .
|
||||
else
|
||||
curl -L -O https://storage.googleapis.com/kubernetes-release/easy-rsa/easy-rsa.tar.gz > /dev/null 2>&1
|
||||
fi
|
||||
tar xzf easy-rsa.tar.gz > /dev/null 2>&1
|
||||
|
||||
cd easy-rsa-master/easyrsa3
|
||||
./easyrsa init-pki > /dev/null 2>&1
|
||||
./easyrsa --batch "--req-cn=$cert_ip@`date +%s`" build-ca nopass > /dev/null 2>&1
|
||||
if [ $use_cn = "true" ]; then
|
||||
./easyrsa build-server-full $cert_ip nopass > /dev/null 2>&1
|
||||
cp -p pki/issued/$cert_ip.crt "${cert_dir}/server.cert" > /dev/null 2>&1
|
||||
cp -p pki/private/$cert_ip.key "${cert_dir}/server.key" > /dev/null 2>&1
|
||||
else
|
||||
./easyrsa --subject-alt-name="${sans}" build-server-full kubernetes-master nopass > /dev/null 2>&1
|
||||
cp -p pki/issued/kubernetes-master.crt "${cert_dir}/server.cert" > /dev/null 2>&1
|
||||
cp -p pki/private/kubernetes-master.key "${cert_dir}/server.key" > /dev/null 2>&1
|
||||
fi
|
||||
./easyrsa build-client-full kubecfg nopass > /dev/null 2>&1
|
||||
cp -p pki/ca.crt "${cert_dir}/ca.crt"
|
||||
cp -p pki/issued/kubecfg.crt "${cert_dir}/kubecfg.crt"
|
||||
cp -p pki/private/kubecfg.key "${cert_dir}/kubecfg.key"
|
||||
# Make server certs accessible to apiserver.
|
||||
chgrp $cert_group "${cert_dir}/server.key" "${cert_dir}/server.cert" "${cert_dir}/ca.crt"
|
||||
chmod 660 "${cert_dir}/server.key" "${cert_dir}/server.cert" "${cert_dir}/ca.crt"
|
26
vendor/k8s.io/kubernetes/cluster/saltbase/salt/generate-cert/make-cert.sh
generated
vendored
Executable file
26
vendor/k8s.io/kubernetes/cluster/saltbase/salt/generate-cert/make-cert.sh
generated
vendored
Executable file
|
@ -0,0 +1,26 @@
|
|||
#!/bin/bash
|
||||
|
||||
# Copyright 2014 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
cert_dir=${CERT_DIR:-/srv/kubernetes}
|
||||
cert_group=${CERT_GROUP:-kube-cert}
|
||||
|
||||
mkdir -p "$cert_dir"
|
||||
|
||||
openssl req -new -newkey rsa:4096 -days 365 -nodes -x509 \
|
||||
-subj "/CN=kubernetes.invalid/O=Kubernetes" \
|
||||
-keyout "${cert_dir}/server.key" -out "${cert_dir}/server.cert"
|
||||
chgrp $cert_group "${cert_dir}/server.key" "${cert_dir}/server.cert"
|
||||
chmod 660 "${cert_dir}/server.key" "${cert_dir}/server.cert"
|
14
vendor/k8s.io/kubernetes/cluster/saltbase/salt/helpers/init.sls
generated
vendored
Normal file
14
vendor/k8s.io/kubernetes/cluster/saltbase/salt/helpers/init.sls
generated
vendored
Normal file
|
@ -0,0 +1,14 @@
|
|||
{% if grains['cloud'] is defined and grains['cloud'] == 'aws' %}
|
||||
/usr/share/google:
|
||||
file.directory:
|
||||
- user: root
|
||||
- group: root
|
||||
- dir_mode: 755
|
||||
|
||||
/usr/share/google/safe_format_and_mount:
|
||||
file.managed:
|
||||
- source: salt://helpers/safe_format_and_mount
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 755
|
||||
{% endif %}
|
147
vendor/k8s.io/kubernetes/cluster/saltbase/salt/helpers/safe_format_and_mount
generated
vendored
Normal file
147
vendor/k8s.io/kubernetes/cluster/saltbase/salt/helpers/safe_format_and_mount
generated
vendored
Normal file
|
@ -0,0 +1,147 @@
|
|||
#! /bin/bash
|
||||
# Copyright 2013 Google Inc. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Mount a disk, formatting it if necessary. If the disk looks like it may
|
||||
# have been formatted before, we will not format it.
|
||||
#
|
||||
# This script uses blkid and file to search for magic "formatted" bytes
|
||||
# at the beginning of the disk. Furthermore, it attempts to use fsck to
|
||||
# repair the filesystem before formatting it.
|
||||
|
||||
FSCK=fsck.ext4
|
||||
MOUNT_OPTIONS="discard,defaults"
|
||||
MKFS="mkfs.ext4 -E lazy_itable_init=0,lazy_journal_init=0 -F"
|
||||
if [ -e /etc/redhat-release ]; then
|
||||
if grep -q '6\..' /etc/redhat-release; then
|
||||
# lazy_journal_init is not recognized in redhat 6
|
||||
MKFS="mkfs.ext4 -E lazy_itable_init=0 -F"
|
||||
elif grep -q '7\..' /etc/redhat-release; then
|
||||
FSCK=fsck.xfs
|
||||
MKFS=mkfs.xfs
|
||||
fi
|
||||
fi
|
||||
|
||||
LOGTAG=safe_format_and_mount
|
||||
LOGFACILITY=user
|
||||
|
||||
function log() {
|
||||
local readonly severity=$1; shift;
|
||||
logger -t ${LOGTAG} -p ${LOGFACILITY}.${severity} -s "$@"
|
||||
}
|
||||
|
||||
function log_command() {
|
||||
local readonly log_file=$(mktemp)
|
||||
local readonly retcode
|
||||
log info "Running: $*"
|
||||
$* > ${log_file} 2>&1
|
||||
retcode=$?
|
||||
# only return the last 1000 lines of the logfile, just in case it's HUGE.
|
||||
tail -1000 ${log_file} | logger -t ${LOGTAG} -p ${LOGFACILITY}.info -s
|
||||
rm -f ${log_file}
|
||||
return ${retcode}
|
||||
}
|
||||
|
||||
function help() {
|
||||
cat >&2 <<EOF
|
||||
$0 [-f fsck_cmd] [-m mkfs_cmd] [-o mount_opts] <device> <mountpoint>
|
||||
EOF
|
||||
exit 0
|
||||
}
|
||||
|
||||
while getopts ":hf:o:m:" opt; do
|
||||
case $opt in
|
||||
h) help;;
|
||||
f) FSCK=$OPTARG;;
|
||||
o) MOUNT_OPTIONS=$OPTARG;;
|
||||
m) MKFS=$OPTARG;;
|
||||
-) break;;
|
||||
\?) log error "Invalid option: -${OPTARG}"; exit 1;;
|
||||
:) log "Option -${OPTARG} requires an argument."; exit 1;;
|
||||
esac
|
||||
done
|
||||
|
||||
shift $(($OPTIND - 1))
|
||||
readonly DISK=$1
|
||||
readonly MOUNTPOINT=$2
|
||||
|
||||
[[ -z ${DISK} ]] && help
|
||||
[[ -z ${MOUNTPOINT} ]] && help
|
||||
|
||||
function disk_looks_unformatted() {
|
||||
blkid ${DISK}
|
||||
if [[ $? == 0 ]]; then
|
||||
return 0
|
||||
fi
|
||||
|
||||
local readonly file_type=$(file --special-files ${DISK})
|
||||
case ${file_type} in
|
||||
*filesystem*)
|
||||
return 0;;
|
||||
esac
|
||||
|
||||
return 1
|
||||
}
|
||||
|
||||
function format_disk() {
|
||||
log_command ${MKFS} ${DISK}
|
||||
}
|
||||
|
||||
function try_repair_disk() {
|
||||
log_command ${FSCK} -a ${DISK}
|
||||
local readonly fsck_return=$?
|
||||
if [[ ${fsck_return} -ge 8 ]]; then
|
||||
log error "Fsck could not correct errors on ${DISK}"
|
||||
return 1
|
||||
fi
|
||||
if [[ ${fsck_return} -gt 0 ]]; then
|
||||
log warning "Fsck corrected errors on ${DISK}"
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
function try_mount() {
|
||||
local mount_retcode
|
||||
try_repair_disk
|
||||
|
||||
log_command mount -o ${MOUNT_OPTIONS} ${DISK} ${MOUNTPOINT}
|
||||
mount_retcode=$?
|
||||
if [[ ${mount_retcode} == 0 ]]; then
|
||||
return 0
|
||||
fi
|
||||
|
||||
# Check to see if it looks like a filesystem before formatting it.
|
||||
disk_looks_unformatted ${DISK}
|
||||
if [[ $? == 0 ]]; then
|
||||
log error "Disk ${DISK} looks formatted but won't mount. Giving up."
|
||||
return ${mount_retcode}
|
||||
fi
|
||||
|
||||
# The disk looks like it's not been formatted before.
|
||||
format_disk
|
||||
if [[ $? != 0 ]]; then
|
||||
log error "Format of ${DISK} failed."
|
||||
fi
|
||||
|
||||
log_command mount -o ${MOUNT_OPTIONS} ${DISK} ${MOUNTPOINT}
|
||||
mount_retcode=$?
|
||||
if [[ ${mount_retcode} == 0 ]]; then
|
||||
return 0
|
||||
fi
|
||||
log error "Tried everything we could, but could not mount ${DISK}."
|
||||
return ${mount_retcode}
|
||||
}
|
||||
|
||||
try_mount
|
||||
exit $?
|
189
vendor/k8s.io/kubernetes/cluster/saltbase/salt/kube-addons/init.sls
generated
vendored
Normal file
189
vendor/k8s.io/kubernetes/cluster/saltbase/salt/kube-addons/init.sls
generated
vendored
Normal file
|
@ -0,0 +1,189 @@
|
|||
addon-dir-delete:
|
||||
file.absent:
|
||||
- name: /etc/kubernetes/addons
|
||||
|
||||
addon-dir-create:
|
||||
file.directory:
|
||||
- name: /etc/kubernetes/addons
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 0755
|
||||
- require:
|
||||
- file: addon-dir-delete
|
||||
|
||||
{% if pillar.get('enable_cluster_monitoring', '').lower() == 'influxdb' %}
|
||||
/etc/kubernetes/addons/cluster-monitoring/influxdb:
|
||||
file.recurse:
|
||||
- source: salt://kube-addons/cluster-monitoring/influxdb
|
||||
- include_pat: E@(^.+\.yaml$|^.+\.json$)
|
||||
- template: jinja
|
||||
- user: root
|
||||
- group: root
|
||||
- dir_mode: 755
|
||||
- file_mode: 644
|
||||
{% endif %}
|
||||
|
||||
{% if pillar.get('enable_l7_loadbalancing', '').lower() == 'glbc' %}
|
||||
/etc/kubernetes/addons/cluster-loadbalancing/glbc:
|
||||
file.recurse:
|
||||
- source: salt://kube-addons/cluster-loadbalancing/glbc
|
||||
- include_pat: E@(^.+\.yaml$|^.+\.json$)
|
||||
- template: jinja
|
||||
- user: root
|
||||
- group: root
|
||||
- dir_mode: 755
|
||||
- file_mode: 644
|
||||
{% endif %}
|
||||
|
||||
{% if pillar.get('enable_cluster_monitoring', '').lower() == 'google' %}
|
||||
/etc/kubernetes/addons/cluster-monitoring/google:
|
||||
file.recurse:
|
||||
- source: salt://kube-addons/cluster-monitoring/google
|
||||
- include_pat: E@(^.+\.yaml$|^.+\.json$)
|
||||
- template: jinja
|
||||
- user: root
|
||||
- group: root
|
||||
- dir_mode: 755
|
||||
- file_mode: 644
|
||||
{% endif %}
|
||||
|
||||
{% if pillar.get('enable_cluster_monitoring', '').lower() == 'standalone' %}
|
||||
/etc/kubernetes/addons/cluster-monitoring/standalone:
|
||||
file.recurse:
|
||||
- source: salt://kube-addons/cluster-monitoring/standalone
|
||||
- include_pat: E@(^.+\.yaml$|^.+\.json$)
|
||||
- template: jinja
|
||||
- user: root
|
||||
- group: root
|
||||
- dir_mode: 755
|
||||
- file_mode: 644
|
||||
{% endif %}
|
||||
|
||||
{% if pillar.get('enable_cluster_monitoring', '').lower() == 'googleinfluxdb' %}
|
||||
/etc/kubernetes/addons/cluster-monitoring/googleinfluxdb:
|
||||
file.recurse:
|
||||
- source: salt://kube-addons/cluster-monitoring
|
||||
- include_pat: E@(^.+\.yaml$|^.+\.json$)
|
||||
- exclude_pat: E@(^.+heapster-controller\.yaml$|^.+heapster-controller\.json$)
|
||||
- template: jinja
|
||||
- user: root
|
||||
- group: root
|
||||
- dir_mode: 755
|
||||
- file_mode: 644
|
||||
{% endif %}
|
||||
|
||||
{% if pillar.get('enable_cluster_dns', '').lower() == 'true' %}
|
||||
/etc/kubernetes/addons/dns/kubedns-svc.yaml:
|
||||
file.managed:
|
||||
- source: salt://kube-addons/dns/kubedns-svc.yaml.in
|
||||
- template: jinja
|
||||
- group: root
|
||||
- dir_mode: 755
|
||||
- makedirs: True
|
||||
|
||||
/etc/kubernetes/addons/dns/kubedns-controller.yaml:
|
||||
file.managed:
|
||||
- source: salt://kube-addons/dns/kubedns-controller.yaml.in
|
||||
- template: jinja
|
||||
- group: root
|
||||
- dir_mode: 755
|
||||
- makedirs: True
|
||||
{% endif %}
|
||||
|
||||
{% if pillar.get('enable_dns_horizontal_autoscaler', '').lower() == 'true'
|
||||
and pillar.get('enable_cluster_dns', '').lower() == 'true' %}
|
||||
/etc/kubernetes/addons/dns-horizontal-autoscaler/dns-horizontal-autoscaler.yaml:
|
||||
file.managed:
|
||||
- source: salt://kube-addons/dns-horizontal-autoscaler/dns-horizontal-autoscaler.yaml
|
||||
- user: root
|
||||
- group: root
|
||||
- file_mode: 644
|
||||
- makedirs: True
|
||||
{% endif %}
|
||||
|
||||
{% if pillar.get('enable_cluster_registry', '').lower() == 'true' %}
|
||||
/etc/kubernetes/addons/registry/registry-svc.yaml:
|
||||
file.managed:
|
||||
- source: salt://kube-addons/registry/registry-svc.yaml
|
||||
- user: root
|
||||
- group: root
|
||||
- file_mode: 644
|
||||
- makedirs: True
|
||||
|
||||
/etc/kubernetes/addons/registry/registry-rc.yaml:
|
||||
file.managed:
|
||||
- source: salt://kube-addons/registry/registry-rc.yaml
|
||||
- user: root
|
||||
- group: root
|
||||
- file_mode: 644
|
||||
- makedirs: True
|
||||
|
||||
/etc/kubernetes/addons/registry/registry-pv.yaml:
|
||||
file.managed:
|
||||
- source: salt://kube-addons/registry/registry-pv.yaml.in
|
||||
- template: jinja
|
||||
- user: root
|
||||
- group: root
|
||||
- file_mode: 644
|
||||
- makedirs: True
|
||||
|
||||
/etc/kubernetes/addons/registry/registry-pvc.yaml:
|
||||
file.managed:
|
||||
- source: salt://kube-addons/registry/registry-pvc.yaml.in
|
||||
- template: jinja
|
||||
- user: root
|
||||
- group: root
|
||||
- file_mode: 644
|
||||
- makedirs: True
|
||||
{% endif %}
|
||||
|
||||
{% if pillar.get('enable_node_logging', '').lower() == 'true'
|
||||
and 'logging_destination' in pillar
|
||||
and pillar.get('enable_cluster_logging', '').lower() == 'true' %}
|
||||
/etc/kubernetes/addons/fluentd-{{ pillar.get('logging_destination') }}:
|
||||
file.recurse:
|
||||
- source: salt://kube-addons/fluentd-{{ pillar.get('logging_destination') }}
|
||||
- include_pat: E@^.+\.yaml$
|
||||
- user: root
|
||||
- group: root
|
||||
- dir_mode: 755
|
||||
- file_mode: 644
|
||||
{% endif %}
|
||||
|
||||
{% if pillar.get('enable_cluster_ui', '').lower() == 'true' %}
|
||||
/etc/kubernetes/addons/dashboard:
|
||||
file.recurse:
|
||||
- source: salt://kube-addons/dashboard
|
||||
- include_pat: E@^.+\.yaml$
|
||||
- user: root
|
||||
- group: root
|
||||
- dir_mode: 755
|
||||
- file_mode: 644
|
||||
{% endif %}
|
||||
|
||||
{% if pillar.get('enable_node_problem_detector', '').lower() == 'true' %}
|
||||
/etc/kubernetes/addons/node-problem-detector/node-problem-detector.yaml:
|
||||
file.managed:
|
||||
- source: salt://kube-addons/node-problem-detector/node-problem-detector.yaml
|
||||
- user: root
|
||||
- group: root
|
||||
- file_mode: 644
|
||||
- makedirs: True
|
||||
{% endif %}
|
||||
|
||||
/etc/kubernetes/manifests/kube-addon-manager.yaml:
|
||||
file.managed:
|
||||
- source: salt://kube-addons/kube-addon-manager.yaml
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 755
|
||||
|
||||
{% if pillar.get('enable_default_storage_class', '').lower() == 'true' and grains['cloud'] is defined and grains['cloud'] in ['aws', 'gce', 'openstack'] %}
|
||||
/etc/kubernetes/addons/storage-class/default.yaml:
|
||||
file.managed:
|
||||
- source: salt://kube-addons/storage-class/{{ grains['cloud'] }}/default.yaml
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 644
|
||||
- makedirs: True
|
||||
{% endif %}
|
37
vendor/k8s.io/kubernetes/cluster/saltbase/salt/kube-addons/kube-addon-manager.yaml
generated
vendored
Normal file
37
vendor/k8s.io/kubernetes/cluster/saltbase/salt/kube-addons/kube-addon-manager.yaml
generated
vendored
Normal file
|
@ -0,0 +1,37 @@
|
|||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: kube-addon-manager
|
||||
namespace: kube-system
|
||||
labels:
|
||||
component: kube-addon-manager
|
||||
spec:
|
||||
hostNetwork: true
|
||||
containers:
|
||||
- name: kube-addon-manager
|
||||
# When updating version also bump it in:
|
||||
# - cluster/images/hyperkube/static-pods/addon-manager-singlenode.json
|
||||
# - cluster/images/hyperkube/static-pods/addon-manager-multinode.json
|
||||
image: gcr.io/google-containers/kube-addon-manager:v6.2
|
||||
command:
|
||||
- /bin/bash
|
||||
- -c
|
||||
- /opt/kube-addons.sh 1>>/var/log/kube-addon-manager.log 2>&1
|
||||
resources:
|
||||
requests:
|
||||
cpu: 5m
|
||||
memory: 50Mi
|
||||
volumeMounts:
|
||||
- mountPath: /etc/kubernetes/
|
||||
name: addons
|
||||
readOnly: true
|
||||
- mountPath: /var/log
|
||||
name: varlog
|
||||
readOnly: false
|
||||
volumes:
|
||||
- hostPath:
|
||||
path: /etc/kubernetes/
|
||||
name: addons
|
||||
- hostPath:
|
||||
path: /var/log
|
||||
name: varlog
|
10
vendor/k8s.io/kubernetes/cluster/saltbase/salt/kube-admission-controls/init.sls
generated
vendored
Normal file
10
vendor/k8s.io/kubernetes/cluster/saltbase/salt/kube-admission-controls/init.sls
generated
vendored
Normal file
|
@ -0,0 +1,10 @@
|
|||
{% if 'LimitRanger' in pillar.get('admission_control', '') %}
|
||||
/etc/kubernetes/admission-controls/limit-range:
|
||||
file.recurse:
|
||||
- source: salt://kube-admission-controls/limit-range
|
||||
- include_pat: E@(^.+\.yaml$|^.+\.json$)
|
||||
- user: root
|
||||
- group: root
|
||||
- dir_mode: 755
|
||||
- file_mode: 644
|
||||
{% endif %}
|
10
vendor/k8s.io/kubernetes/cluster/saltbase/salt/kube-admission-controls/limit-range/limit-range.yaml
generated
vendored
Normal file
10
vendor/k8s.io/kubernetes/cluster/saltbase/salt/kube-admission-controls/limit-range/limit-range.yaml
generated
vendored
Normal file
|
@ -0,0 +1,10 @@
|
|||
apiVersion: "v1"
|
||||
kind: "LimitRange"
|
||||
metadata:
|
||||
name: "limits"
|
||||
namespace: default
|
||||
spec:
|
||||
limits:
|
||||
- type: "Container"
|
||||
defaultRequest:
|
||||
cpu: "100m"
|
7
vendor/k8s.io/kubernetes/cluster/saltbase/salt/kube-apiserver/abac-authz-policy.jsonl
generated
vendored
Normal file
7
vendor/k8s.io/kubernetes/cluster/saltbase/salt/kube-apiserver/abac-authz-policy.jsonl
generated
vendored
Normal file
|
@ -0,0 +1,7 @@
|
|||
{% set kube_user = grains.kube_user -%}
|
||||
{"apiVersion": "abac.authorization.kubernetes.io/v1beta1", "kind": "Policy", "spec": {"user":"admin", "namespace": "*", "resource": "*", "apiGroup": "*", "nonResourcePath": "*"}}
|
||||
{"apiVersion": "abac.authorization.kubernetes.io/v1beta1", "kind": "Policy", "spec": {"user":"{{kube_user}}", "namespace": "*", "resource": "*", "apiGroup": "*", "nonResourcePath": "*"}}
|
||||
{"apiVersion": "abac.authorization.kubernetes.io/v1beta1", "kind": "Policy", "spec": {"user":"kubelet", "namespace": "*", "resource": "*", "apiGroup": "*", "nonResourcePath": "*"}}
|
||||
{"apiVersion": "abac.authorization.kubernetes.io/v1beta1", "kind": "Policy", "spec": {"user":"kubecfg", "namespace": "*", "resource": "*", "apiGroup": "*", "nonResourcePath": "*"}}
|
||||
{"apiVersion": "abac.authorization.kubernetes.io/v1beta1", "kind": "Policy", "spec": {"user":"client", "namespace": "*", "resource": "*", "apiGroup": "*", "nonResourcePath": "*"}}
|
||||
{"apiVersion": "abac.authorization.kubernetes.io/v1beta1", "kind": "Policy", "spec": {"group":"system:serviceaccounts", "namespace": "*", "resource": "*", "apiGroup": "*", "nonResourcePath": "*"}}
|
65
vendor/k8s.io/kubernetes/cluster/saltbase/salt/kube-apiserver/init.sls
generated
vendored
Normal file
65
vendor/k8s.io/kubernetes/cluster/saltbase/salt/kube-apiserver/init.sls
generated
vendored
Normal file
|
@ -0,0 +1,65 @@
|
|||
{% if grains['cloud'] is defined and grains.cloud in ['aws', 'gce', 'vagrant', 'photon-controller', 'openstack'] %}
|
||||
# TODO: generate and distribute tokens on other cloud providers.
|
||||
/srv/kubernetes/known_tokens.csv:
|
||||
file.managed:
|
||||
- source: salt://kube-apiserver/known_tokens.csv
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 600
|
||||
# - watch_in:
|
||||
# - service: kube-apiserver
|
||||
|
||||
/srv/kubernetes/basic_auth.csv:
|
||||
file.managed:
|
||||
- source: salt://kube-apiserver/basic_auth.csv
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 600
|
||||
|
||||
/srv/kubernetes/abac-authz-policy.jsonl:
|
||||
file.managed:
|
||||
- source: salt://kube-apiserver/abac-authz-policy.jsonl
|
||||
- template: jinja
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 600
|
||||
{% endif %}
|
||||
|
||||
/var/log/kube-apiserver.log:
|
||||
file.managed:
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 644
|
||||
|
||||
# Copy kube-apiserver manifest to manifests folder for kubelet.
|
||||
# Current containervm image by default has both docker and kubelet
|
||||
# running. But during cluster creation stage, docker and kubelet
|
||||
# could be overwritten completely, or restarted due to flag changes.
|
||||
# The ordering of salt states for service docker, kubelet and
|
||||
# master-addon below is very important to avoid the race between
|
||||
# salt restart docker or kubelet and kubelet start master components.
|
||||
# Without the ordering of salt states, when gce instance boot up,
|
||||
# configure-vm.sh will run and download the release. At the end of
|
||||
# boot, run-salt will installs kube-apiserver.manifest files to
|
||||
# kubelet config directory before the installation of proper version
|
||||
# kubelet. Please see
|
||||
# http://issue.k8s.io/10122#issuecomment-114566063
|
||||
# for detail explanation on this very issue.
|
||||
/etc/kubernetes/manifests/kube-apiserver.manifest:
|
||||
file.managed:
|
||||
- source: salt://kube-apiserver/kube-apiserver.manifest
|
||||
- template: jinja
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 644
|
||||
- makedirs: true
|
||||
- dir_mode: 755
|
||||
- require:
|
||||
- service: docker
|
||||
- service: kubelet
|
||||
|
||||
#stop legacy kube-apiserver service
|
||||
stop_kube-apiserver:
|
||||
service.dead:
|
||||
- name: kube-apiserver
|
||||
- enable: None
|
299
vendor/k8s.io/kubernetes/cluster/saltbase/salt/kube-apiserver/kube-apiserver.manifest
generated
vendored
Normal file
299
vendor/k8s.io/kubernetes/cluster/saltbase/salt/kube-apiserver/kube-apiserver.manifest
generated
vendored
Normal file
|
@ -0,0 +1,299 @@
|
|||
{% set daemon_args = "$DAEMON_ARGS" -%}
|
||||
{% if grains['os_family'] == 'RedHat' -%}
|
||||
{% set daemon_args = "" -%}
|
||||
{% endif -%}
|
||||
|
||||
{% set cloud_provider = "" -%}
|
||||
{% set cloud_config = "" -%}
|
||||
{% set cloud_config_mount = "" -%}
|
||||
{% set cloud_config_volume = "" -%}
|
||||
{% set additional_cloud_config_mount = "{\"name\": \"usrsharessl\",\"mountPath\": \"/usr/share/ssl\", \"readOnly\": true}, {\"name\": \"usrssl\",\"mountPath\": \"/usr/ssl\", \"readOnly\": true}, {\"name\": \"usrlibssl\",\"mountPath\": \"/usr/lib/ssl\", \"readOnly\": true}, {\"name\": \"usrlocalopenssl\",\"mountPath\": \"/usr/local/openssl\", \"readOnly\": true}," -%}
|
||||
{% set additional_cloud_config_volume = "{\"name\": \"usrsharessl\",\"hostPath\": {\"path\": \"/usr/share/ssl\"}}, {\"name\": \"usrssl\",\"hostPath\": {\"path\": \"/usr/ssl\"}}, {\"name\": \"usrlibssl\",\"hostPath\": {\"path\": \"/usr/lib/ssl\"}}, {\"name\": \"usrlocalopenssl\",\"hostPath\": {\"path\": \"/usr/local/openssl\"}}," -%}
|
||||
|
||||
{% set srv_kube_path = "/srv/kubernetes" -%}
|
||||
{% set srv_sshproxy_path = "/srv/sshproxy" -%}
|
||||
|
||||
{% if grains.cloud is defined -%}
|
||||
{% if grains.cloud not in ['vagrant', 'photon-controller', 'azure-legacy'] -%}
|
||||
{% set cloud_provider = "--cloud-provider=" + grains.cloud -%}
|
||||
{% endif -%}
|
||||
|
||||
{% if grains.cloud in [ 'openstack' ] and grains.cloud_config is defined -%}
|
||||
{% set cloud_config = "--cloud-config=" + grains.cloud_config -%}
|
||||
{% endif -%}
|
||||
|
||||
{% if grains.cloud in [ 'aws', 'gce' ] and grains.cloud_config is defined -%}
|
||||
{% set cloud_config = "--cloud-config=" + grains.cloud_config -%}
|
||||
{% set cloud_config_mount = "{\"name\": \"cloudconfigmount\",\"mountPath\": \"" + grains.cloud_config + "\", \"readOnly\": true}," -%}
|
||||
{% set cloud_config_volume = "{\"name\": \"cloudconfigmount\",\"hostPath\": {\"path\": \"" + grains.cloud_config + "\"}}," -%}
|
||||
{% endif -%}
|
||||
|
||||
{% if grains.cloud in ['openstack'] -%}
|
||||
{% set cloud_config_mount = "{\"name\": \"instanceid\",\"mountPath\": \"/var/lib/cloud/data/instance-id\",\"readOnly\": true}," -%}
|
||||
{% set cloud_config_volume = "{\"name\": \"instanceid\",\"hostPath\": {\"path\": \"/var/lib/cloud/data/instance-id\"}}," -%}
|
||||
{% endif -%}
|
||||
{% endif -%}
|
||||
|
||||
{% set advertise_address = "" -%}
|
||||
{% if grains.advertise_address is defined -%}
|
||||
{% set advertise_address = "--advertise-address=" + grains.advertise_address -%}
|
||||
{% endif -%}
|
||||
|
||||
{% set proxy_ssh_options = "" -%}
|
||||
{% if grains.proxy_ssh_user is defined -%}
|
||||
{% set proxy_ssh_options = "--ssh-user=" + grains.proxy_ssh_user + " --ssh-keyfile=/srv/sshproxy/.sshkeyfile" -%}
|
||||
{# Append 40 characters onto command to work around #9822. #}
|
||||
{# If mount list changes, this may also need to change. #}
|
||||
{% set proxy_ssh_options = proxy_ssh_options + " " -%}
|
||||
{% endif -%}
|
||||
|
||||
{% set address = "--address=127.0.0.1" -%}
|
||||
|
||||
{% set bind_address = "" -%}
|
||||
{% if grains.publicAddressOverride is defined -%}
|
||||
{% set bind_address = "--bind-address=" + grains.publicAddressOverride -%}
|
||||
{% endif -%}
|
||||
|
||||
{% set storage_backend = "" -%}
|
||||
{% if pillar['storage_backend'] is defined -%}
|
||||
{% set storage_backend = "--storage-backend=" + pillar['storage_backend'] -%}
|
||||
{% endif -%}
|
||||
{% set etcd_servers = "--etcd-servers=http://127.0.0.1:2379" -%}
|
||||
{% set etcd_servers_overrides = "--etcd-servers-overrides=/events#http://127.0.0.1:4002" -%}
|
||||
|
||||
{% set max_requests_inflight = "" -%}
|
||||
{% set target_ram_mb = "" -%}
|
||||
{% if pillar['num_nodes'] is defined -%}
|
||||
# If the cluster is large, increase max-requests-inflight limit in apiserver.
|
||||
{% if pillar['num_nodes']|int >= 1000 -%}
|
||||
{% set max_requests_inflight = "--max-requests-inflight=1500 --max-mutating-requests-inflight=500" -%}
|
||||
{% endif -%}
|
||||
# Set amount of memory available for apiserver based on number of nodes.
|
||||
# TODO: Once we start setting proper requests and limits for apiserver
|
||||
# we should reuse the same logic here instead of current heuristic.
|
||||
{% set tmp_ram_mb = pillar['num_nodes']|int * 60 %}
|
||||
{% set target_ram_mb = "--target-ram-mb=" + tmp_ram_mb|string -%}
|
||||
{% endif -%}
|
||||
|
||||
{% set service_cluster_ip_range = "" -%}
|
||||
{% if pillar['service_cluster_ip_range'] is defined -%}
|
||||
{% set service_cluster_ip_range = "--service-cluster-ip-range=" + pillar['service_cluster_ip_range'] -%}
|
||||
{% endif -%}
|
||||
|
||||
{% set cert_file = "--tls-cert-file=/srv/kubernetes/server.cert" -%}
|
||||
{% set key_file = "--tls-private-key-file=/srv/kubernetes/server.key" -%}
|
||||
{% set kubelet_cert_file = "--kubelet-client-certificate=/srv/kubernetes/kubeapiserver.cert" -%}
|
||||
{% set kubelet_key_file = "--kubelet-client-key=/srv/kubernetes/kubeapiserver.key" -%}
|
||||
{% set client_ca_file = "" -%}
|
||||
|
||||
{% set secure_port = "6443" -%}
|
||||
{% if grains['cloud'] is defined and grains.cloud in [ 'aws', 'gce', 'vagrant', 'photon-controller', 'openstack'] %}
|
||||
{% set secure_port = "443" -%}
|
||||
{% set client_ca_file = "--client-ca-file=/srv/kubernetes/ca.crt" -%}
|
||||
{% endif -%}
|
||||
|
||||
{% set min_request_timeout = "" -%}
|
||||
{% if grains.minRequestTimeout is defined -%}
|
||||
{% set min_request_timeout = "--min-request-timeout=" + grains.minRequestTimeout -%}
|
||||
{% endif -%}
|
||||
|
||||
{% set token_auth_file = " --token-auth-file=/dev/null" -%}
|
||||
{% set basic_auth_file = "" -%}
|
||||
{% set authz_mode = "" -%}
|
||||
{% set abac_policy_file = "" -%}
|
||||
{% if grains['cloud'] is defined and grains.cloud in [ 'aws', 'gce', 'vagrant', 'photon-controller', 'openstack'] %}
|
||||
{% set token_auth_file = " --token-auth-file=/srv/kubernetes/known_tokens.csv" -%}
|
||||
{% set basic_auth_file = " --basic-auth-file=/srv/kubernetes/basic_auth.csv" -%}
|
||||
{% set authz_mode = " --authorization-mode=ABAC" -%}
|
||||
{% set abac_policy_file = " --authorization-policy-file=/srv/kubernetes/abac-authz-policy.jsonl" -%}
|
||||
{% endif -%}
|
||||
|
||||
{% set webhook_authentication_config = "" -%}
|
||||
{% set webhook_authn_config_mount = "" -%}
|
||||
{% set webhook_authn_config_volume = "" -%}
|
||||
{% if grains.webhook_authentication_config is defined -%}
|
||||
{% set webhook_authentication_config = " --authentication-token-webhook-config-file=" + grains.webhook_authentication_config -%}
|
||||
{% set webhook_authn_config_mount = "{\"name\": \"webhookauthnconfigmount\",\"mountPath\": \"" + grains.webhook_authentication_config + "\", \"readOnly\": false}," -%}
|
||||
{% set webhook_authn_config_volume = "{\"name\": \"webhookauthnconfigmount\",\"hostPath\": {\"path\": \"" + grains.webhook_authentication_config + "\"}}," -%}
|
||||
{% endif -%}
|
||||
|
||||
{% set webhook_authorization_config = "" -%}
|
||||
{% set webhook_config_mount = "" -%}
|
||||
{% set webhook_config_volume = "" -%}
|
||||
{% if grains.webhook_authorization_config is defined -%}
|
||||
{% set webhook_authorization_config = " --authorization-webhook-config-file=" + grains.webhook_authorization_config -%}
|
||||
{% set webhook_config_mount = "{\"name\": \"webhookconfigmount\",\"mountPath\": \"" + grains.webhook_authorization_config + "\", \"readOnly\": false}," -%}
|
||||
{% set webhook_config_volume = "{\"name\": \"webhookconfigmount\",\"hostPath\": {\"path\": \"" + grains.webhook_authorization_config + "\"}}," -%}
|
||||
{% set authz_mode = authz_mode + ",Webhook" -%}
|
||||
{% endif -%}
|
||||
|
||||
{% set image_review_config = "" -%}
|
||||
{% set admission_controller_config_mount = "" -%}
|
||||
{% set admission_controller_config_volume = "" -%}
|
||||
{% set image_policy_webhook_config_mount = "" -%}
|
||||
{% set image_policy_webhook_config_volume = "" -%}
|
||||
{% if grains.image_review_config is defined -%}
|
||||
{% set image_review_config = " --admission-control-config-file=" + grains.image_review_config -%}
|
||||
{% set admission_controller_config_mount = "{\"name\": \"admissioncontrollerconfigmount\",\"mountPath\": \"" + grains.image_review_config + "\", \"readOnly\": false}," -%}
|
||||
{% set admission_controller_config_volume = "{\"name\": \"admissioncontrollerconfigmount\",\"hostPath\": {\"path\": \"" + grains.image_review_config + "\"}}," -%}
|
||||
{% set image_policy_webhook_config_mount = "{\"name\": \"imagepolicywebhookconfigmount\",\"mountPath\": \"/etc/gcp_image_review.config\", \"readOnly\": false}," -%}
|
||||
{% set image_policy_webhook_config_volume = "{\"name\": \"imagepolicywebhookconfigmount\",\"hostPath\": {\"path\": \"/etc/gcp_image_review.config\"}}," -%}
|
||||
{% endif -%}
|
||||
|
||||
{% set admission_control = "" -%}
|
||||
{% if pillar['admission_control'] is defined -%}
|
||||
{% set admission_control = "--admission-control=" + pillar['admission_control'] -%}
|
||||
{% endif -%}
|
||||
|
||||
{% set runtime_config = "" -%}
|
||||
{% if grains.runtime_config is defined -%}
|
||||
{% set runtime_config = "--runtime-config=" + grains.runtime_config -%}
|
||||
{% endif -%}
|
||||
|
||||
{% set feature_gates = "" -%}
|
||||
{% if grains.feature_gates is defined -%}
|
||||
{% set feature_gates = "--feature-gates=" + grains.feature_gates -%}
|
||||
{% endif -%}
|
||||
|
||||
{% set log_level = pillar['log_level'] -%}
|
||||
{% if pillar['api_server_test_log_level'] is defined -%}
|
||||
{% set log_level = pillar['api_server_test_log_level'] -%}
|
||||
{% endif -%}
|
||||
|
||||
{% set enable_garbage_collector = "" -%}
|
||||
{% if pillar['enable_garbage_collector'] is defined -%}
|
||||
{% set enable_garbage_collector = "--enable-garbage-collector=" + pillar['enable_garbage_collector'] -%}
|
||||
{% endif -%}
|
||||
|
||||
{% set etcd_quorum_read = "" %}
|
||||
{% if pillar['etcd_quorum_read'] is defined -%}
|
||||
{% set etcd_quorum_read = "--etcd_quorum_read=" + pillar['etcd_quorum_read'] -%}
|
||||
{% endif -%}
|
||||
|
||||
{% set params = address + " " + storage_backend + " " + etcd_servers + " " + etcd_servers_overrides + " " + cloud_provider + " " + cloud_config + " " + runtime_config + " " + feature_gates + " " + admission_control + " " + max_requests_inflight + " " + target_ram_mb + " " + service_cluster_ip_range + " " + client_ca_file + basic_auth_file + " " + min_request_timeout + " " + enable_garbage_collector + " " + etcd_quorum_read -%}
|
||||
{% set params = params + " " + cert_file + " " + key_file + " " + kubelet_cert_file + " " + kubelet_key_file + " --secure-port=" + secure_port + token_auth_file + " " + bind_address + " " + log_level + " " + advertise_address + " " + proxy_ssh_options + authz_mode + abac_policy_file + webhook_authentication_config + webhook_authorization_config + image_review_config -%}
|
||||
|
||||
# test_args has to be kept at the end, so they'll overwrite any prior configuration
|
||||
{% if pillar['apiserver_test_args'] is defined -%}
|
||||
{% set params = params + " " + pillar['apiserver_test_args'] -%}
|
||||
{% endif -%}
|
||||
|
||||
{
|
||||
"apiVersion": "v1",
|
||||
"kind": "Pod",
|
||||
"metadata": {
|
||||
"name":"kube-apiserver",
|
||||
"namespace": "kube-system",
|
||||
"labels": {
|
||||
"tier": "control-plane",
|
||||
"component": "kube-apiserver"
|
||||
}
|
||||
},
|
||||
"spec":{
|
||||
"hostNetwork": true,
|
||||
"containers":[
|
||||
{
|
||||
"name": "kube-apiserver",
|
||||
"image": "{{pillar['kube_docker_registry']}}/kube-apiserver:{{pillar['kube-apiserver_docker_tag']}}",
|
||||
"resources": {
|
||||
"requests": {
|
||||
"cpu": "250m"
|
||||
}
|
||||
},
|
||||
"command": [
|
||||
"/bin/sh",
|
||||
"-c",
|
||||
"/usr/local/bin/kube-apiserver {{params}} --allow-privileged={{pillar['allow_privileged']}} 1>>/var/log/kube-apiserver.log 2>&1"
|
||||
],
|
||||
"livenessProbe": {
|
||||
"httpGet": {
|
||||
"host": "127.0.0.1",
|
||||
"port": 8080,
|
||||
"path": "/healthz"
|
||||
},
|
||||
"initialDelaySeconds": 15,
|
||||
"timeoutSeconds": 15
|
||||
},
|
||||
"ports":[
|
||||
{ "name": "https",
|
||||
"containerPort": {{secure_port}},
|
||||
"hostPort": {{secure_port}}},{
|
||||
"name": "local",
|
||||
"containerPort": 8080,
|
||||
"hostPort": 8080}
|
||||
],
|
||||
"volumeMounts": [
|
||||
{{cloud_config_mount}}
|
||||
{{additional_cloud_config_mount}}
|
||||
{{webhook_config_mount}}
|
||||
{{webhook_authn_config_mount}}
|
||||
{{admission_controller_config_mount}}
|
||||
{{image_policy_webhook_config_mount}}
|
||||
{ "name": "srvkube",
|
||||
"mountPath": "{{srv_kube_path}}",
|
||||
"readOnly": true},
|
||||
{ "name": "logfile",
|
||||
"mountPath": "/var/log/kube-apiserver.log",
|
||||
"readOnly": false},
|
||||
{ "name": "etcssl",
|
||||
"mountPath": "/etc/ssl",
|
||||
"readOnly": true},
|
||||
{ "name": "usrsharecacerts",
|
||||
"mountPath": "/usr/share/ca-certificates",
|
||||
"readOnly": true},
|
||||
{ "name": "varssl",
|
||||
"mountPath": "/var/ssl",
|
||||
"readOnly": true},
|
||||
{ "name": "etcopenssl",
|
||||
"mountPath": "/etc/openssl",
|
||||
"readOnly": true},
|
||||
{ "name": "etcpki",
|
||||
"mountPath": "/etc/pki",
|
||||
"readOnly": true},
|
||||
{ "name": "srvsshproxy",
|
||||
"mountPath": "{{srv_sshproxy_path}}",
|
||||
"readOnly": false}
|
||||
]
|
||||
}
|
||||
],
|
||||
"volumes":[
|
||||
{{cloud_config_volume}}
|
||||
{{additional_cloud_config_volume}}
|
||||
{{webhook_config_volume}}
|
||||
{{webhook_authn_config_volume}}
|
||||
{{admission_controller_config_volume}}
|
||||
{{image_policy_webhook_config_volume}}
|
||||
{ "name": "srvkube",
|
||||
"hostPath": {
|
||||
"path": "{{srv_kube_path}}"}
|
||||
},
|
||||
{ "name": "logfile",
|
||||
"hostPath": {
|
||||
"path": "/var/log/kube-apiserver.log"}
|
||||
},
|
||||
{ "name": "etcssl",
|
||||
"hostPath": {
|
||||
"path": "/etc/ssl"}
|
||||
},
|
||||
{ "name": "usrsharecacerts",
|
||||
"hostPath": {
|
||||
"path": "/usr/share/ca-certificates"}
|
||||
},
|
||||
{ "name": "varssl",
|
||||
"hostPath": {
|
||||
"path": "/var/ssl"}
|
||||
},
|
||||
{ "name": "etcopenssl",
|
||||
"hostPath": {
|
||||
"path": "/etc/openssl"}
|
||||
},
|
||||
{ "name": "etcpki",
|
||||
"hostPath": {
|
||||
"path": "/etc/pki"}
|
||||
},
|
||||
{ "name": "srvsshproxy",
|
||||
"hostPath": {
|
||||
"path": "{{srv_sshproxy_path}}"}
|
||||
}
|
||||
]
|
||||
}}
|
6
vendor/k8s.io/kubernetes/cluster/saltbase/salt/kube-client-tools.sls
generated
vendored
Normal file
6
vendor/k8s.io/kubernetes/cluster/saltbase/salt/kube-client-tools.sls
generated
vendored
Normal file
|
@ -0,0 +1,6 @@
|
|||
/usr/local/bin/kubectl:
|
||||
file.managed:
|
||||
- source: salt://kube-bins/kubectl
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 755
|
30
vendor/k8s.io/kubernetes/cluster/saltbase/salt/kube-controller-manager/init.sls
generated
vendored
Normal file
30
vendor/k8s.io/kubernetes/cluster/saltbase/salt/kube-controller-manager/init.sls
generated
vendored
Normal file
|
@ -0,0 +1,30 @@
|
|||
# Copy kube-controller-manager manifest to manifests folder for kubelet.
|
||||
# The ordering of salt states for service docker, kubelet and
|
||||
# master-addon below is very important to avoid the race between
|
||||
# salt restart docker or kubelet and kubelet start master components.
|
||||
# Please see http://issue.k8s.io/10122#issuecomment-114566063
|
||||
# for detail explanation on this very issue.
|
||||
/etc/kubernetes/manifests/kube-controller-manager.manifest:
|
||||
file.managed:
|
||||
- source: salt://kube-controller-manager/kube-controller-manager.manifest
|
||||
- template: jinja
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 644
|
||||
- makedirs: true
|
||||
- dir_mode: 755
|
||||
- require:
|
||||
- service: docker
|
||||
- service: kubelet
|
||||
|
||||
/var/log/kube-controller-manager.log:
|
||||
file.managed:
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 644
|
||||
|
||||
stop-legacy-kube_controller_manager:
|
||||
service.dead:
|
||||
- name: kube-controller-manager
|
||||
- enable: None
|
||||
|
184
vendor/k8s.io/kubernetes/cluster/saltbase/salt/kube-controller-manager/kube-controller-manager.manifest
generated
vendored
Normal file
184
vendor/k8s.io/kubernetes/cluster/saltbase/salt/kube-controller-manager/kube-controller-manager.manifest
generated
vendored
Normal file
|
@ -0,0 +1,184 @@
|
|||
{% set cluster_name = "" -%}
|
||||
{% set cluster_cidr = "" -%}
|
||||
{% set allocate_node_cidrs = "" -%}
|
||||
{% set service_cluster_ip_range = "" %}
|
||||
{% set terminated_pod_gc = "" -%}
|
||||
|
||||
|
||||
{% if pillar['instance_prefix'] is defined -%}
|
||||
{% set cluster_name = "--cluster-name=" + pillar['instance_prefix'] -%}
|
||||
{% endif -%}
|
||||
{% if pillar['cluster_cidr'] is defined and pillar['cluster_cidr'] != "" -%}
|
||||
{% set cluster_cidr = "--cluster-cidr=" + pillar['cluster_cidr'] -%}
|
||||
{% endif -%}
|
||||
{% if pillar['service_cluster_ip_range'] is defined and pillar['service_cluster_ip_range'] != "" -%}
|
||||
{% set service_cluster_ip_range = "--service_cluster_ip_range=" + pillar['service_cluster_ip_range'] -%}
|
||||
{% endif -%}
|
||||
{% if pillar.get('network_provider', '').lower() == 'kubenet' %}
|
||||
{% set allocate_node_cidrs = "--allocate-node-cidrs=true" -%}
|
||||
{% elif pillar['allocate_node_cidrs'] is defined -%}
|
||||
{% set allocate_node_cidrs = "--allocate-node-cidrs=" + pillar['allocate_node_cidrs'] -%}
|
||||
{% endif -%}
|
||||
{% if pillar['terminated_pod_gc_threshold'] is defined -%}
|
||||
{% set terminated_pod_gc = "--terminated-pod-gc-threshold=" + pillar['terminated_pod_gc_threshold'] -%}
|
||||
{% endif -%}
|
||||
|
||||
{% set enable_garbage_collector = "" -%}
|
||||
{% if pillar['enable_garbage_collector'] is defined -%}
|
||||
{% set enable_garbage_collector = "--enable-garbage-collector=" + pillar['enable_garbage_collector'] -%}
|
||||
{% endif -%}
|
||||
|
||||
{% set cloud_provider = "" -%}
|
||||
{% set cloud_config = "" -%}
|
||||
{% set cloud_config_mount = "" -%}
|
||||
{% set cloud_config_volume = "" -%}
|
||||
{% set additional_cloud_config_mount = "{\"name\": \"usrsharessl\",\"mountPath\": \"/usr/share/ssl\", \"readOnly\": true}, {\"name\": \"usrssl\",\"mountPath\": \"/usr/ssl\", \"readOnly\": true}, {\"name\": \"usrlibssl\",\"mountPath\": \"/usr/lib/ssl\", \"readOnly\": true}, {\"name\": \"usrlocalopenssl\",\"mountPath\": \"/usr/local/openssl\", \"readOnly\": true}," -%}
|
||||
{% set additional_cloud_config_volume = "{\"name\": \"usrsharessl\",\"hostPath\": {\"path\": \"/usr/share/ssl\"}}, {\"name\": \"usrssl\",\"hostPath\": {\"path\": \"/usr/ssl\"}}, {\"name\": \"usrlibssl\",\"hostPath\": {\"path\": \"/usr/lib/ssl\"}}, {\"name\": \"usrlocalopenssl\",\"hostPath\": {\"path\": \"/usr/local/openssl\"}}," -%}
|
||||
{% set srv_kube_path = "/srv/kubernetes" -%}
|
||||
|
||||
{% if grains.cloud is defined -%}
|
||||
{% if grains.cloud not in ['vagrant', 'photon-controller', 'azure-legacy'] -%}
|
||||
{% set cloud_provider = "--cloud-provider=" + grains.cloud -%}
|
||||
{% endif -%}
|
||||
{% set service_account_key = "--service-account-private-key-file=/srv/kubernetes/server.key" -%}
|
||||
|
||||
{% if grains.cloud in [ 'openstack' ] and grains.cloud_config is defined -%}
|
||||
{% set cloud_config = "--cloud-config=" + grains.cloud_config -%}
|
||||
{% endif -%}
|
||||
|
||||
{% if grains.cloud in [ 'aws', 'gce' ] and grains.cloud_config is defined -%}
|
||||
{% set cloud_config = "--cloud-config=" + grains.cloud_config -%}
|
||||
{% set cloud_config_mount = "{\"name\": \"cloudconfigmount\",\"mountPath\": \"" + grains.cloud_config + "\", \"readOnly\": true}," -%}
|
||||
{% set cloud_config_volume = "{\"name\": \"cloudconfigmount\",\"hostPath\": {\"path\": \"" + grains.cloud_config + "\"}}," -%}
|
||||
{% endif -%}
|
||||
|
||||
{% if grains.cloud in ['openstack'] -%}
|
||||
{% set cloud_config_mount = "{\"name\": \"instanceid\",\"mountPath\": \"/var/lib/cloud/data/instance-id\",\"readOnly\": true}," -%}
|
||||
{% set cloud_config_volume = "{\"name\": \"instanceid\",\"hostPath\": {\"path\": \"/var/lib/cloud/data/instance-id\"}}," -%}
|
||||
{% endif -%}
|
||||
{% endif -%}
|
||||
|
||||
{% set root_ca_file = "" -%}
|
||||
|
||||
{% if grains['cloud'] is defined and grains.cloud in [ 'aws', 'gce', 'vagrant', 'photon-controller', 'openstack', 'azure-legacy'] %}
|
||||
{% set root_ca_file = "--root-ca-file=/srv/kubernetes/ca.crt" -%}
|
||||
{% endif -%}
|
||||
|
||||
{% set log_level = pillar['log_level'] -%}
|
||||
{% if pillar['controller_manager_test_log_level'] is defined -%}
|
||||
{% set log_level = pillar['controller_manager_test_log_level'] -%}
|
||||
{% endif -%}
|
||||
|
||||
{% set feature_gates = "" -%}
|
||||
{% if grains.feature_gates is defined -%}
|
||||
{% set feature_gates = "--feature-gates=" + grains.feature_gates -%}
|
||||
{% endif -%}
|
||||
|
||||
{% set params = "--master=127.0.0.1:8080" + " " + cluster_name + " " + cluster_cidr + " " + allocate_node_cidrs + " " + service_cluster_ip_range + " " + terminated_pod_gc + " " + enable_garbage_collector + " " + cloud_provider + " " + cloud_config + " " + service_account_key + " " + log_level + " " + root_ca_file -%}
|
||||
{% set params = params + " " + feature_gates -%}
|
||||
|
||||
{% if pillar.get('enable_hostpath_provisioner', '').lower() == 'true' -%}
|
||||
{% set params = params + " --enable-hostpath-provisioner" %}
|
||||
{% endif -%}
|
||||
|
||||
# test_args has to be kept at the end, so they'll overwrite any prior configuration
|
||||
{% if pillar['controller_manager_test_args'] is defined -%}
|
||||
{% set params = params + " " + pillar['controller_manager_test_args'] -%}
|
||||
{% endif -%}
|
||||
|
||||
{
|
||||
"apiVersion": "v1",
|
||||
"kind": "Pod",
|
||||
"metadata": {
|
||||
"name":"kube-controller-manager",
|
||||
"namespace": "kube-system",
|
||||
"labels": {
|
||||
"tier": "control-plane",
|
||||
"component": "kube-controller-manager"
|
||||
}
|
||||
},
|
||||
"spec":{
|
||||
"hostNetwork": true,
|
||||
"containers":[
|
||||
{
|
||||
"name": "kube-controller-manager",
|
||||
"image": "{{pillar['kube_docker_registry']}}/kube-controller-manager:{{pillar['kube-controller-manager_docker_tag']}}",
|
||||
"resources": {
|
||||
"requests": {
|
||||
"cpu": "200m"
|
||||
}
|
||||
},
|
||||
"command": [
|
||||
"/bin/sh",
|
||||
"-c",
|
||||
"/usr/local/bin/kube-controller-manager {{params}} 1>>/var/log/kube-controller-manager.log 2>&1"
|
||||
],
|
||||
"livenessProbe": {
|
||||
"httpGet": {
|
||||
"host": "127.0.0.1",
|
||||
"port": 10252,
|
||||
"path": "/healthz"
|
||||
},
|
||||
"initialDelaySeconds": 15,
|
||||
"timeoutSeconds": 15
|
||||
},
|
||||
"volumeMounts": [
|
||||
{{cloud_config_mount}}
|
||||
{{additional_cloud_config_mount}}
|
||||
{ "name": "srvkube",
|
||||
"mountPath": "{{srv_kube_path}}",
|
||||
"readOnly": true},
|
||||
{ "name": "logfile",
|
||||
"mountPath": "/var/log/kube-controller-manager.log",
|
||||
"readOnly": false},
|
||||
{ "name": "etcssl",
|
||||
"mountPath": "/etc/ssl",
|
||||
"readOnly": true},
|
||||
{ "name": "usrsharecacerts",
|
||||
"mountPath": "/usr/share/ca-certificates",
|
||||
"readOnly": true},
|
||||
{ "name": "varssl",
|
||||
"mountPath": "/var/ssl",
|
||||
"readOnly": true},
|
||||
{ "name": "etcopenssl",
|
||||
"mountPath": "/etc/openssl",
|
||||
"readOnly": true},
|
||||
{ "name": "etcpki",
|
||||
"mountPath": "/etc/pki",
|
||||
"readOnly": true}
|
||||
]
|
||||
}
|
||||
],
|
||||
"volumes":[
|
||||
{{cloud_config_volume}}
|
||||
{{additional_cloud_config_volume}}
|
||||
{ "name": "srvkube",
|
||||
"hostPath": {
|
||||
"path": "{{srv_kube_path}}"}
|
||||
},
|
||||
{ "name": "logfile",
|
||||
"hostPath": {
|
||||
"path": "/var/log/kube-controller-manager.log"}
|
||||
},
|
||||
{ "name": "etcssl",
|
||||
"hostPath": {
|
||||
"path": "/etc/ssl"}
|
||||
},
|
||||
{ "name": "usrsharecacerts",
|
||||
"hostPath": {
|
||||
"path": "/usr/share/ca-certificates"}
|
||||
},
|
||||
{ "name": "varssl",
|
||||
"hostPath": {
|
||||
"path": "/var/ssl"}
|
||||
},
|
||||
{ "name": "etcopenssl",
|
||||
"hostPath": {
|
||||
"path": "/etc/openssl"}
|
||||
},
|
||||
{ "name": "etcpki",
|
||||
"hostPath": {
|
||||
"path": "/etc/pki"}
|
||||
}
|
||||
]
|
||||
}}
|
52
vendor/k8s.io/kubernetes/cluster/saltbase/salt/kube-master-addons/init.sls
generated
vendored
Normal file
52
vendor/k8s.io/kubernetes/cluster/saltbase/salt/kube-master-addons/init.sls
generated
vendored
Normal file
|
@ -0,0 +1,52 @@
|
|||
/etc/kubernetes/kube-master-addons.sh:
|
||||
file.managed:
|
||||
- source: salt://kube-master-addons/kube-master-addons.sh
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 755
|
||||
|
||||
# Used to restart kube-master-addons service each time salt is run
|
||||
# Actually, it does not work (the service is not restarted),
|
||||
# but master-addon service always terminates after it does it job,
|
||||
# so it is (usually) not running and it will be started when
|
||||
# salt is run.
|
||||
# This salt state is not removed because there is a risk
|
||||
# of introducing regression in 1.0. Please remove it afterwards.
|
||||
# See also the salt config for kube-addons to see how to restart
|
||||
# a service on demand.
|
||||
master-docker-image-tags:
|
||||
file.touch:
|
||||
- name: /srv/pillar/docker-images.sls
|
||||
|
||||
{% if pillar.get('is_systemd') %}
|
||||
|
||||
{{ pillar.get('systemd_system_path') }}/kube-master-addons.service:
|
||||
file.managed:
|
||||
- source: salt://kube-master-addons/kube-master-addons.service
|
||||
- user: root
|
||||
- group: root
|
||||
cmd.wait:
|
||||
- name: /opt/kubernetes/helpers/services bounce kube-master-addons
|
||||
- watch:
|
||||
- file: master-docker-image-tags
|
||||
- file: /etc/kubernetes/kube-master-addons.sh
|
||||
- file: {{ pillar.get('systemd_system_path') }}/kube-master-addons.service
|
||||
|
||||
{% else %}
|
||||
|
||||
/etc/init.d/kube-master-addons:
|
||||
file.managed:
|
||||
- source: salt://kube-master-addons/initd
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 755
|
||||
|
||||
kube-master-addons:
|
||||
service.running:
|
||||
- enable: True
|
||||
- restart: True
|
||||
- watch:
|
||||
- file: master-docker-image-tags
|
||||
- file: /etc/kubernetes/kube-master-addons.sh
|
||||
|
||||
{% endif %}
|
95
vendor/k8s.io/kubernetes/cluster/saltbase/salt/kube-master-addons/initd
generated
vendored
Normal file
95
vendor/k8s.io/kubernetes/cluster/saltbase/salt/kube-master-addons/initd
generated
vendored
Normal file
|
@ -0,0 +1,95 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
### BEGIN INIT INFO
|
||||
# Provides: kube-master-addons
|
||||
# Required-Start: $local_fs $network $syslog docker
|
||||
# Required-Stop:
|
||||
# Default-Start: 2 3 4 5
|
||||
# Default-Stop: 0 1 6
|
||||
# Short-Description: Kubernetes Master Addon Object Manager
|
||||
# Description:
|
||||
# Enforces installation of Kubernetes Master Addon Objects
|
||||
### END INIT INFO
|
||||
|
||||
|
||||
# PATH should only include /usr/* if it runs after the mountnfs.sh script
|
||||
PATH=/sbin:/usr/sbin:/bin:/usr/bin
|
||||
DESC="Kubernetes Master Addon Object Manager"
|
||||
NAME=kube-master-addons
|
||||
DAEMON_LOG_FILE=/var/log/$NAME.log
|
||||
PIDFILE=/var/run/$NAME.pid
|
||||
SCRIPTNAME=/etc/init.d/$NAME
|
||||
KUBE_MASTER_ADDONS_SH=/etc/kubernetes/kube-master-addons.sh
|
||||
|
||||
# Define LSB log_* functions.
|
||||
# Depend on lsb-base (>= 3.2-14) to ensure that this file is present
|
||||
# and status_of_proc is working.
|
||||
. /lib/lsb/init-functions
|
||||
|
||||
|
||||
|
||||
|
||||
#
|
||||
# Function that starts the daemon/service
|
||||
#
|
||||
do_start()
|
||||
{
|
||||
${KUBE_MASTER_ADDONS_SH} </dev/null >>${DAEMON_LOG_FILE} 2>&1 &
|
||||
echo $! > ${PIDFILE}
|
||||
disown
|
||||
}
|
||||
|
||||
#
|
||||
# Function that stops the daemon/service
|
||||
#
|
||||
do_stop()
|
||||
{
|
||||
kill $(cat ${PIDFILE})
|
||||
rm ${PIDFILE}
|
||||
return
|
||||
}
|
||||
|
||||
case "$1" in
|
||||
start)
|
||||
log_daemon_msg "Starting $DESC" "$NAME"
|
||||
do_start
|
||||
case "$?" in
|
||||
0|1) log_end_msg 0 || exit 0 ;;
|
||||
2) log_end_msg 1 || exit 1 ;;
|
||||
esac
|
||||
;;
|
||||
stop)
|
||||
log_daemon_msg "Stopping $DESC" "$NAME"
|
||||
do_stop
|
||||
case "$?" in
|
||||
0|1) log_end_msg 0 ;;
|
||||
2) exit 1 ;;
|
||||
esac
|
||||
;;
|
||||
status)
|
||||
status_of_proc -p $PIDFILE $KUBE_MASTER_ADDONS_SH $NAME
|
||||
;;
|
||||
|
||||
restart|force-reload)
|
||||
log_daemon_msg "Restarting $DESC" "$NAME"
|
||||
do_stop
|
||||
case "$?" in
|
||||
0|1)
|
||||
do_start
|
||||
case "$?" in
|
||||
0) log_end_msg 0 ;;
|
||||
1) log_end_msg 1 ;; # Old process is still running
|
||||
*) log_end_msg 1 ;; # Failed to start
|
||||
esac
|
||||
;;
|
||||
*)
|
||||
# Failed to stop
|
||||
log_end_msg 1
|
||||
;;
|
||||
esac
|
||||
;;
|
||||
*)
|
||||
echo "Usage: $SCRIPTNAME {start|stop|status|restart|force-reload}" >&2
|
||||
exit 3
|
||||
;;
|
||||
esac
|
9
vendor/k8s.io/kubernetes/cluster/saltbase/salt/kube-master-addons/kube-master-addons.service
generated
vendored
Normal file
9
vendor/k8s.io/kubernetes/cluster/saltbase/salt/kube-master-addons/kube-master-addons.service
generated
vendored
Normal file
|
@ -0,0 +1,9 @@
|
|||
[Unit]
|
||||
Description=Kubernetes-Master Addon Object Manager
|
||||
Documentation=https://github.com/kubernetes/kubernetes
|
||||
|
||||
[Service]
|
||||
ExecStart=/etc/kubernetes/kube-master-addons.sh
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
90
vendor/k8s.io/kubernetes/cluster/saltbase/salt/kube-master-addons/kube-master-addons.sh
generated
vendored
Executable file
90
vendor/k8s.io/kubernetes/cluster/saltbase/salt/kube-master-addons/kube-master-addons.sh
generated
vendored
Executable file
|
@ -0,0 +1,90 @@
|
|||
#!/bin/bash
|
||||
|
||||
# Copyright 2014 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# loadedImageFlags is a bit-flag to track which docker images loaded successfully.
|
||||
|
||||
function load-docker-images() {
|
||||
let loadedImageFlags=0
|
||||
|
||||
while true; do
|
||||
restart_docker=false
|
||||
|
||||
if which docker 1>/dev/null 2>&1; then
|
||||
|
||||
timeout 120 docker load -i /srv/salt/kube-bins/kube-apiserver.tar 1>/dev/null 2>&1
|
||||
rc=$?
|
||||
if [[ $rc == 0 ]]; then
|
||||
let loadedImageFlags="$loadedImageFlags|1"
|
||||
elif [[ $rc == 124 ]]; then
|
||||
restart_docker=true
|
||||
fi
|
||||
|
||||
timeout 120 docker load -i /srv/salt/kube-bins/kube-scheduler.tar 1>/dev/null 2>&1
|
||||
rc=$?
|
||||
if [[ $rc == 0 ]]; then
|
||||
let loadedImageFlags="$loadedImageFlags|2"
|
||||
elif [[ $rc == 124 ]]; then
|
||||
restart_docker=true
|
||||
fi
|
||||
|
||||
timeout 120 docker load -i /srv/salt/kube-bins/kube-controller-manager.tar 1>/dev/null 2>&1
|
||||
rc=$?
|
||||
if [[ $rc == 0 ]]; then
|
||||
let loadedImageFlags="$loadedImageFlags|4"
|
||||
elif [[ $rc == 124 ]]; then
|
||||
restart_docker=true
|
||||
fi
|
||||
fi
|
||||
|
||||
# required docker images got installed. exit while loop.
|
||||
if [[ $loadedImageFlags == 7 ]]; then break; fi
|
||||
|
||||
# Sometimes docker load hang, restart docker daemon resolve the issue
|
||||
if [[ $restart_docker ]]; then
|
||||
if ! service docker restart; then # Try systemctl if there's no service command.
|
||||
systemctl restart docker
|
||||
fi
|
||||
fi
|
||||
|
||||
# sleep for 15 seconds before attempting to load docker images again
|
||||
sleep 15
|
||||
|
||||
done
|
||||
}
|
||||
|
||||
function convert-rkt-image() {
|
||||
(cd /tmp; ${DOCKER2ACI_BIN} $1)
|
||||
}
|
||||
|
||||
function load-rkt-images() {
|
||||
convert-rkt-image /srv/salt/kube-bins/kube-apiserver.tar
|
||||
convert-rkt-image /srv/salt/kube-bins/kube-scheduler.tar
|
||||
convert-rkt-image /srv/salt/kube-bins/kube-controller-manager.tar
|
||||
|
||||
# Currently, we can't run docker image tarballs directly,
|
||||
# So we use 'rkt fetch' to load the docker images into rkt image stores.
|
||||
# see https://github.com/coreos/rkt/issues/2392.
|
||||
${RKT_BIN} fetch /tmp/*.aci --insecure-options=image
|
||||
}
|
||||
|
||||
if [[ "${KUBERNETES_CONTAINER_RUNTIME}" == "rkt" ]]; then
|
||||
load-rkt-images
|
||||
else
|
||||
load-docker-images
|
||||
fi
|
||||
|
||||
# Now exit. After kube-push, salt will notice that the service is down and it
|
||||
# will start it and new docker images will be loaded.
|
71
vendor/k8s.io/kubernetes/cluster/saltbase/salt/kube-node-unpacker/init.sls
generated
vendored
Normal file
71
vendor/k8s.io/kubernetes/cluster/saltbase/salt/kube-node-unpacker/init.sls
generated
vendored
Normal file
|
@ -0,0 +1,71 @@
|
|||
/etc/kubernetes/kube-node-unpacker.sh:
|
||||
file.managed:
|
||||
- source: salt://kube-node-unpacker/kube-node-unpacker.sh
|
||||
- makedirs: True
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 755
|
||||
|
||||
{% if grains.cloud is defined and grains.cloud == 'gce' %}
|
||||
node-docker-image-tags:
|
||||
file.touch:
|
||||
- name: /srv/pillar/docker-images.sls
|
||||
{% else %}
|
||||
kube-proxy-tar:
|
||||
file.managed:
|
||||
- name: /srv/salt/kube-bins/kube-proxy.tar
|
||||
- source: salt://kube-bins/kube-proxy.tar
|
||||
- makedirs: True
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 644
|
||||
{% endif %}
|
||||
|
||||
{% set is_helium = '0' %}
|
||||
# Super annoying, the salt version on GCE is old enough that 'salt.cmd.run'
|
||||
# isn't supported
|
||||
{% if grains.cloud is defined and grains.cloud == 'aws' %}
|
||||
# Salt has terrible problems with systemd on AWS too
|
||||
{% set is_helium = '0' %}
|
||||
{% endif %}
|
||||
# Salt Helium doesn't support systemd modules for service running
|
||||
{% if pillar.get('is_systemd') and is_helium == '0' %}
|
||||
|
||||
{{ pillar.get('systemd_system_path') }}/kube-node-unpacker.service:
|
||||
file.managed:
|
||||
- source: salt://kube-node-unpacker/kube-node-unpacker.service
|
||||
- user: root
|
||||
- group: root
|
||||
cmd.wait:
|
||||
- name: /opt/kubernetes/helpers/services bounce kube-node-unpacker
|
||||
- watch:
|
||||
{% if grains.cloud is defined and grains.cloud == 'gce' %}
|
||||
- file: node-docker-image-tags
|
||||
{% else %}
|
||||
- file: kube-proxy-tar
|
||||
{% endif %}
|
||||
- file: /etc/kubernetes/kube-node-unpacker.sh
|
||||
- file: {{ pillar.get('systemd_system_path') }}/kube-node-unpacker.service
|
||||
|
||||
{% else %}
|
||||
|
||||
/etc/init.d/kube-node-unpacker:
|
||||
file.managed:
|
||||
- source: salt://kube-node-unpacker/initd
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 755
|
||||
|
||||
kube-node-unpacker:
|
||||
service.running:
|
||||
- enable: True
|
||||
- restart: True
|
||||
- watch:
|
||||
{% if grains.cloud is defined and grains.cloud == 'gce' %}
|
||||
- file: node-docker-image-tags
|
||||
{% else %}
|
||||
- file: kube-proxy-tar
|
||||
{% endif %}
|
||||
- file: /etc/kubernetes/kube-node-unpacker.sh
|
||||
|
||||
{% endif %}
|
95
vendor/k8s.io/kubernetes/cluster/saltbase/salt/kube-node-unpacker/initd
generated
vendored
Executable file
95
vendor/k8s.io/kubernetes/cluster/saltbase/salt/kube-node-unpacker/initd
generated
vendored
Executable file
|
@ -0,0 +1,95 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
### BEGIN INIT INFO
|
||||
# Provides: kube-node-unpacker
|
||||
# Required-Start: $local_fs $network $syslog docker
|
||||
# Required-Stop:
|
||||
# Default-Start: 2 3 4 5
|
||||
# Default-Stop: 0 1 6
|
||||
# Short-Description: Kubernetes Node Unpacker
|
||||
# Description:
|
||||
# Unpacks docker images on Kubernetes nodes
|
||||
### END INIT INFO
|
||||
|
||||
|
||||
# PATH should only include /usr/* if it runs after the mountnfs.sh script
|
||||
PATH=/sbin:/usr/sbin:/bin:/usr/bin
|
||||
DESC="Kubernetes Node Unpacker"
|
||||
NAME=kube-node-unpacker
|
||||
DAEMON_LOG_FILE=/var/log/$NAME.log
|
||||
PIDFILE=/var/run/$NAME.pid
|
||||
SCRIPTNAME=/etc/init.d/$NAME
|
||||
KUBE_MASTER_ADDONS_SH=/etc/kubernetes/kube-node-unpacker.sh
|
||||
|
||||
# Define LSB log_* functions.
|
||||
# Depend on lsb-base (>= 3.2-14) to ensure that this file is present
|
||||
# and status_of_proc is working.
|
||||
. /lib/lsb/init-functions
|
||||
|
||||
|
||||
|
||||
|
||||
#
|
||||
# Function that starts the daemon/service
|
||||
#
|
||||
do_start()
|
||||
{
|
||||
${KUBE_MASTER_ADDONS_SH} </dev/null >>${DAEMON_LOG_FILE} 2>&1 &
|
||||
echo $! > ${PIDFILE}
|
||||
disown
|
||||
}
|
||||
|
||||
#
|
||||
# Function that stops the daemon/service
|
||||
#
|
||||
do_stop()
|
||||
{
|
||||
kill $(cat ${PIDFILE})
|
||||
rm ${PIDFILE}
|
||||
return
|
||||
}
|
||||
|
||||
case "$1" in
|
||||
start)
|
||||
log_daemon_msg "Starting $DESC" "$NAME"
|
||||
do_start
|
||||
case "$?" in
|
||||
0|1) log_end_msg 0 || exit 0 ;;
|
||||
2) log_end_msg 1 || exit 1 ;;
|
||||
esac
|
||||
;;
|
||||
stop)
|
||||
log_daemon_msg "Stopping $DESC" "$NAME"
|
||||
do_stop
|
||||
case "$?" in
|
||||
0|1) log_end_msg 0 ;;
|
||||
2) exit 1 ;;
|
||||
esac
|
||||
;;
|
||||
status)
|
||||
status_of_proc -p $PIDFILE $KUBE_MASTER_ADDONS_SH $NAME
|
||||
;;
|
||||
|
||||
restart|force-reload)
|
||||
log_daemon_msg "Restarting $DESC" "$NAME"
|
||||
do_stop
|
||||
case "$?" in
|
||||
0|1)
|
||||
do_start
|
||||
case "$?" in
|
||||
0) log_end_msg 0 ;;
|
||||
1) log_end_msg 1 ;; # Old process is still running
|
||||
*) log_end_msg 1 ;; # Failed to start
|
||||
esac
|
||||
;;
|
||||
*)
|
||||
# Failed to stop
|
||||
log_end_msg 1
|
||||
;;
|
||||
esac
|
||||
;;
|
||||
*)
|
||||
echo "Usage: $SCRIPTNAME {start|stop|status|restart|force-reload}" >&2
|
||||
exit 3
|
||||
;;
|
||||
esac
|
9
vendor/k8s.io/kubernetes/cluster/saltbase/salt/kube-node-unpacker/kube-node-unpacker.service
generated
vendored
Normal file
9
vendor/k8s.io/kubernetes/cluster/saltbase/salt/kube-node-unpacker/kube-node-unpacker.service
generated
vendored
Normal file
|
@ -0,0 +1,9 @@
|
|||
[Unit]
|
||||
Description=Kubernetes Node Unpacker
|
||||
Documentation=https://github.com/kubernetes/kubernetes
|
||||
|
||||
[Service]
|
||||
ExecStart=/etc/kubernetes/kube-node-unpacker.sh
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
46
vendor/k8s.io/kubernetes/cluster/saltbase/salt/kube-node-unpacker/kube-node-unpacker.sh
generated
vendored
Executable file
46
vendor/k8s.io/kubernetes/cluster/saltbase/salt/kube-node-unpacker/kube-node-unpacker.sh
generated
vendored
Executable file
|
@ -0,0 +1,46 @@
|
|||
#!/bin/bash
|
||||
|
||||
# Copyright 2015 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# loadedImageFlags is a bit-flag to track which docker images loaded successfully.
|
||||
let loadedImageFlags=0
|
||||
|
||||
while true; do
|
||||
restart_docker=false
|
||||
|
||||
if which docker 1>/dev/null 2>&1; then
|
||||
|
||||
timeout 120 docker load -i /srv/salt/kube-bins/kube-proxy.tar 1>/dev/null 2>&1
|
||||
rc=$?
|
||||
if [[ "${rc}" == 0 ]]; then
|
||||
let loadedImageFlags="${loadedImageFlags}|1"
|
||||
elif [[ "${rc}" == 124 ]]; then
|
||||
restart_docker=true
|
||||
fi
|
||||
fi
|
||||
|
||||
# required docker images got installed. exit while loop.
|
||||
if [[ "${loadedImageFlags}" == 1 ]]; then break; fi
|
||||
|
||||
# Sometimes docker load hang, restart docker daemon resolve the issue
|
||||
if [[ "${restart_docker}" ]]; then service docker restart; fi
|
||||
|
||||
# sleep for 15 seconds before attempting to load docker images again
|
||||
sleep 15
|
||||
|
||||
done
|
||||
|
||||
# Now exit. After kube-push, salt will notice that the service is down and it
|
||||
# will start it and new docker images will be loaded.
|
38
vendor/k8s.io/kubernetes/cluster/saltbase/salt/kube-proxy/init.sls
generated
vendored
Normal file
38
vendor/k8s.io/kubernetes/cluster/saltbase/salt/kube-proxy/init.sls
generated
vendored
Normal file
|
@ -0,0 +1,38 @@
|
|||
/var/lib/kube-proxy/kubeconfig:
|
||||
file.managed:
|
||||
- source: salt://kube-proxy/kubeconfig
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 400
|
||||
- makedirs: true
|
||||
|
||||
# kube-proxy in a static pod
|
||||
/etc/kubernetes/manifests/kube-proxy.manifest:
|
||||
file.managed:
|
||||
- source: salt://kube-proxy/kube-proxy.manifest
|
||||
- template: jinja
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 644
|
||||
- makedirs: true
|
||||
- dir_mode: 755
|
||||
- context:
|
||||
# Increasing to 100m to avoid CPU starvation on full nodes.
|
||||
# Any change here should be accompanied by a proportional change in CPU
|
||||
# requests of other per-node add-ons (e.g. fluentd).
|
||||
cpurequest: '100m'
|
||||
- require:
|
||||
- service: docker
|
||||
- service: kubelet
|
||||
|
||||
/var/log/kube-proxy.log:
|
||||
file.managed:
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 644
|
||||
|
||||
#stop legacy kube-proxy service
|
||||
stop_kube-proxy:
|
||||
service.dead:
|
||||
- name: kube-proxy
|
||||
- enable: None
|
91
vendor/k8s.io/kubernetes/cluster/saltbase/salt/kube-proxy/kube-proxy.manifest
generated
vendored
Normal file
91
vendor/k8s.io/kubernetes/cluster/saltbase/salt/kube-proxy/kube-proxy.manifest
generated
vendored
Normal file
|
@ -0,0 +1,91 @@
|
|||
{% set kubeconfig = "--kubeconfig=/var/lib/kube-proxy/kubeconfig" -%}
|
||||
{% if grains.api_servers is defined -%}
|
||||
{% set api_servers = "--master=https://" + grains.api_servers -%}
|
||||
{% else -%}
|
||||
{% set ips = salt['mine.get']('roles:kubernetes-master', 'network.ip_addrs', 'grain').values() -%}
|
||||
{% set api_servers = "--master=https://" + ips[0][0] -%}
|
||||
{% endif -%}
|
||||
{% if grains['cloud'] is defined and grains.cloud in [ 'aws', 'gce', 'vagrant', 'photon-controller', 'openstack', 'azure-legacy' ] %}
|
||||
{% set api_servers_with_port = api_servers -%}
|
||||
{% else -%}
|
||||
{% set api_servers_with_port = api_servers + ":6443" -%}
|
||||
{% endif -%}
|
||||
{% set test_args = "" -%}
|
||||
{% if pillar['kubeproxy_test_args'] is defined -%}
|
||||
{% set test_args=pillar['kubeproxy_test_args'] %}
|
||||
{% endif -%}
|
||||
{% set cluster_cidr = "" -%}
|
||||
{% if pillar['cluster_cidr'] is defined -%}
|
||||
{% set cluster_cidr=" --cluster-cidr=" + pillar['cluster_cidr'] %}
|
||||
{% endif -%}
|
||||
|
||||
{% set log_level = pillar['log_level'] -%}
|
||||
{% if pillar['kubeproxy_test_log_level'] is defined -%}
|
||||
{% set log_level = pillar['kubeproxy_test_log_level'] -%}
|
||||
{% endif -%}
|
||||
|
||||
{% set feature_gates = "" -%}
|
||||
{% if grains.feature_gates is defined -%}
|
||||
{% set feature_gates = "--feature-gates=" + grains.feature_gates -%}
|
||||
{% endif -%}
|
||||
|
||||
# test_args should always go last to overwrite prior configuration
|
||||
{% set params = log_level + " " + feature_gates + " " + test_args -%}
|
||||
|
||||
# kube-proxy podspec
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: kube-proxy
|
||||
namespace: kube-system
|
||||
# This annotation lowers the possibility that kube-proxy gets evicted when the
|
||||
# node is under memory pressure, and prioritizes it for admission, even if
|
||||
# the node is under memory pressure.
|
||||
# Note that kube-proxy runs as a static pod so this annotation does NOT have
|
||||
# any effect on rescheduler (default scheduler and rescheduler are not
|
||||
# involved in scheduling kube-proxy).
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
labels:
|
||||
tier: node
|
||||
component: kube-proxy
|
||||
spec:
|
||||
hostNetwork: true
|
||||
containers:
|
||||
- name: kube-proxy
|
||||
image: {{pillar['kube_docker_registry']}}/kube-proxy:{{pillar['kube-proxy_docker_tag']}}
|
||||
resources:
|
||||
requests:
|
||||
cpu: {{ cpurequest }}
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- kube-proxy {{api_servers_with_port}} {{kubeconfig}} {{cluster_cidr}} --resource-container="" {{params}} 1>>/var/log/kube-proxy.log 2>&1
|
||||
securityContext:
|
||||
privileged: true
|
||||
volumeMounts:
|
||||
- mountPath: /etc/ssl/certs
|
||||
name: etc-ssl-certs
|
||||
readOnly: true
|
||||
- mountPath: /usr/share/ca-certificates
|
||||
name: usr-ca-certs
|
||||
readOnly: true
|
||||
- mountPath: /var/log
|
||||
name: varlog
|
||||
readOnly: false
|
||||
- mountPath: /var/lib/kube-proxy/kubeconfig
|
||||
name: kubeconfig
|
||||
readOnly: false
|
||||
volumes:
|
||||
- hostPath:
|
||||
path: /usr/share/ca-certificates
|
||||
name: usr-ca-certs
|
||||
- hostPath:
|
||||
path: /etc/ssl/certs
|
||||
name: etc-ssl-certs
|
||||
- hostPath:
|
||||
path: /var/lib/kube-proxy/kubeconfig
|
||||
name: kubeconfig
|
||||
- hostPath:
|
||||
path: /var/log
|
||||
name: varlog
|
0
vendor/k8s.io/kubernetes/cluster/saltbase/salt/kube-proxy/kubeconfig
generated
vendored
Normal file
0
vendor/k8s.io/kubernetes/cluster/saltbase/salt/kube-proxy/kubeconfig
generated
vendored
Normal file
8
vendor/k8s.io/kubernetes/cluster/saltbase/salt/kube-registry-proxy/init.sls
generated
vendored
Normal file
8
vendor/k8s.io/kubernetes/cluster/saltbase/salt/kube-registry-proxy/init.sls
generated
vendored
Normal file
|
@ -0,0 +1,8 @@
|
|||
/etc/kubernetes/manifests/kube-registry-proxy.yaml:
|
||||
file.managed:
|
||||
- source: salt://kube-registry-proxy/kube-registry-proxy.yaml
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 644
|
||||
- makedirs: True
|
||||
- dir_mode: 755
|
35
vendor/k8s.io/kubernetes/cluster/saltbase/salt/kube-registry-proxy/kube-registry-proxy.yaml
generated
vendored
Normal file
35
vendor/k8s.io/kubernetes/cluster/saltbase/salt/kube-registry-proxy/kube-registry-proxy.yaml
generated
vendored
Normal file
|
@ -0,0 +1,35 @@
|
|||
apiVersion: extensions/v1beta1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: kube-registry-proxy
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: kube-registry
|
||||
kubernetes.io/cluster-service: "true"
|
||||
version: v0.4
|
||||
spec:
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kube-registry
|
||||
kubernetes.io/name: "kube-registry-proxy"
|
||||
kubernetes.io/cluster-service: "true"
|
||||
version: v0.4
|
||||
spec:
|
||||
containers:
|
||||
- name: kube-registry-proxy
|
||||
image: gcr.io/google_containers/kube-registry-proxy:0.4
|
||||
resources:
|
||||
limits:
|
||||
cpu: 100m
|
||||
memory: 50Mi
|
||||
env:
|
||||
- name: REGISTRY_HOST
|
||||
value: kube-registry.kube-system.svc.cluster.local
|
||||
- name: REGISTRY_PORT
|
||||
value: "5000"
|
||||
ports:
|
||||
- name: registry
|
||||
containerPort: 80
|
||||
hostPort: 5000
|
||||
|
30
vendor/k8s.io/kubernetes/cluster/saltbase/salt/kube-scheduler/init.sls
generated
vendored
Normal file
30
vendor/k8s.io/kubernetes/cluster/saltbase/salt/kube-scheduler/init.sls
generated
vendored
Normal file
|
@ -0,0 +1,30 @@
|
|||
# Copy kube-scheduler manifest to manifests folder for kubelet.
|
||||
# The ordering of salt states for service docker, kubelet and
|
||||
# master-addon below is very important to avoid the race between
|
||||
# salt restart docker or kubelet and kubelet start master components.
|
||||
# Please see http://issue.k8s.io/10122#issuecomment-114566063
|
||||
# for detail explanation on this very issue.
|
||||
/etc/kubernetes/manifests/kube-scheduler.manifest:
|
||||
file.managed:
|
||||
- source: salt://kube-scheduler/kube-scheduler.manifest
|
||||
- template: jinja
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 644
|
||||
- makedirs: true
|
||||
- dir_mode: 755
|
||||
- require:
|
||||
- service: docker
|
||||
- service: kubelet
|
||||
|
||||
/var/log/kube-scheduler.log:
|
||||
file.managed:
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 644
|
||||
|
||||
#stop legacy kube-scheduler service
|
||||
stop_kube-scheduler:
|
||||
service.dead:
|
||||
- name: kube-scheduler
|
||||
- enable: None
|
76
vendor/k8s.io/kubernetes/cluster/saltbase/salt/kube-scheduler/kube-scheduler.manifest
generated
vendored
Normal file
76
vendor/k8s.io/kubernetes/cluster/saltbase/salt/kube-scheduler/kube-scheduler.manifest
generated
vendored
Normal file
|
@ -0,0 +1,76 @@
|
|||
{% set params = "" -%}
|
||||
|
||||
{% set log_level = pillar['log_level'] -%}
|
||||
{% if pillar['scheduler_test_log_level'] is defined -%}
|
||||
{% set log_level = pillar['scheduler_test_log_level'] -%}
|
||||
{% endif -%}
|
||||
|
||||
{% set feature_gates = "" -%}
|
||||
{% if grains.feature_gates is defined -%}
|
||||
{% set feature_gates = "--feature-gates=" + grains.feature_gates -%}
|
||||
{% endif -%}
|
||||
|
||||
{% set scheduling_algorithm_provider = "" -%}
|
||||
{% if grains.scheduling_algorithm_provider is defined -%}
|
||||
{% set scheduling_algorithm_provider = "--algorithm-provider=" + grains.scheduling_algorithm_provider -%}
|
||||
{% endif -%}
|
||||
|
||||
{% set params = params + log_level + " " + feature_gates + " " + scheduling_algorithm_provider -%}
|
||||
|
||||
# test_args has to be kept at the end, so they'll overwrite any prior configuration
|
||||
{% if pillar['scheduler_test_args'] is defined -%}
|
||||
{% set params = params + " " + pillar['scheduler_test_args'] -%}
|
||||
{% endif -%}
|
||||
|
||||
{
|
||||
"apiVersion": "v1",
|
||||
"kind": "Pod",
|
||||
"metadata": {
|
||||
"name":"kube-scheduler",
|
||||
"namespace": "kube-system",
|
||||
"labels": {
|
||||
"tier": "control-plane",
|
||||
"component": "kube-scheduler"
|
||||
}
|
||||
},
|
||||
"spec":{
|
||||
"hostNetwork": true,
|
||||
"containers":[
|
||||
{
|
||||
"name": "kube-scheduler",
|
||||
"image": "{{pillar['kube_docker_registry']}}/kube-scheduler:{{pillar['kube-scheduler_docker_tag']}}",
|
||||
"resources": {
|
||||
"requests": {
|
||||
"cpu": "100m"
|
||||
}
|
||||
},
|
||||
"command": [
|
||||
"/bin/sh",
|
||||
"-c",
|
||||
"/usr/local/bin/kube-scheduler --master=127.0.0.1:8080 {{params}} 1>>/var/log/kube-scheduler.log 2>&1"
|
||||
],
|
||||
"livenessProbe": {
|
||||
"httpGet": {
|
||||
"host": "127.0.0.1",
|
||||
"port": 10251,
|
||||
"path": "/healthz"
|
||||
},
|
||||
"initialDelaySeconds": 15,
|
||||
"timeoutSeconds": 15
|
||||
},
|
||||
"volumeMounts": [
|
||||
{
|
||||
"name": "logfile",
|
||||
"mountPath": "/var/log/kube-scheduler.log",
|
||||
"readOnly": false
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"volumes":[
|
||||
{ "name": "logfile",
|
||||
"hostPath": {
|
||||
"path": "/var/log/kube-scheduler.log"}
|
||||
}
|
||||
]
|
||||
}}
|
197
vendor/k8s.io/kubernetes/cluster/saltbase/salt/kubelet/default
generated
vendored
Normal file
197
vendor/k8s.io/kubernetes/cluster/saltbase/salt/kubelet/default
generated
vendored
Normal file
|
@ -0,0 +1,197 @@
|
|||
{% set daemon_args = "$DAEMON_ARGS" -%}
|
||||
{% if grains['os_family'] == 'RedHat' -%}
|
||||
{% set daemon_args = "" -%}
|
||||
{% endif -%}
|
||||
|
||||
{% if grains.api_servers is defined -%}
|
||||
{% set api_servers = "--api-servers=https://" + grains.api_servers -%}
|
||||
{% elif grains.apiservers is defined -%} # TODO(remove after 0.16.0): Deprecated form
|
||||
{% set api_servers = "--api-servers=https://" + grains.apiservers -%}
|
||||
{% elif grains['roles'][0] == 'kubernetes-master' -%}
|
||||
{% set master_ipv4 = salt['grains.get']('fqdn_ip4')[0] -%}
|
||||
{% set api_servers = "--api-servers=https://" + master_ipv4 -%}
|
||||
{% else -%}
|
||||
{% set ips = salt['mine.get']('roles:kubernetes-master', 'network.ip_addrs', 'grain').values() -%}
|
||||
{% set api_servers = "--api-servers=https://" + ips[0][0] -%}
|
||||
{% endif -%}
|
||||
|
||||
# TODO: remove nginx for other cloud providers.
|
||||
{% if grains['cloud'] is defined and grains.cloud in [ 'aws', 'gce', 'vagrant', 'photon-controller', 'openstack', 'azure-legacy'] %}
|
||||
{% set api_servers_with_port = api_servers -%}
|
||||
{% else -%}
|
||||
{% set api_servers_with_port = api_servers + ":6443" -%}
|
||||
{% endif -%}
|
||||
|
||||
{% set master_kubelet_args = "" %}
|
||||
|
||||
{% set debugging_handlers = "--enable-debugging-handlers=true" -%}
|
||||
|
||||
{% if grains['roles'][0] == 'kubernetes-master' -%}
|
||||
{% if grains.cloud in ['aws', 'gce', 'vagrant', 'photon-controller', 'openstack', 'azure-legacy'] -%}
|
||||
|
||||
# Unless given a specific directive, disable registration for the kubelet
|
||||
# running on the master.
|
||||
{% if grains.kubelet_api_servers is defined -%}
|
||||
{% set api_servers_with_port = "--api-servers=https://" + grains.kubelet_api_servers -%}
|
||||
{% set master_kubelet_args = master_kubelet_args + "--register-schedulable=false --register-with-taints=node.alpha.kubernetes.io/ismaster=:NoSchedule" -%}
|
||||
{% else -%}
|
||||
{% set api_servers_with_port = "" -%}
|
||||
{% endif -%}
|
||||
|
||||
# Disable the debugging handlers (/run and /exec) to prevent arbitrary
|
||||
# code execution on the master.
|
||||
# TODO(roberthbailey): Relax this constraint once the master is self-hosted.
|
||||
{% set debugging_handlers = "--enable-debugging-handlers=false" -%}
|
||||
{% endif -%}
|
||||
{% endif -%}
|
||||
|
||||
{% set cloud_provider = "" -%}
|
||||
{% if grains.cloud is defined and grains.cloud not in ['vagrant', 'photon-controller', 'azure-legacy'] -%}
|
||||
{% set cloud_provider = "--cloud-provider=" + grains.cloud -%}
|
||||
{% endif -%}
|
||||
|
||||
{% set cloud_config = "" -%}
|
||||
{% if grains.cloud in [ 'openstack' ] and grains.cloud_config is defined -%}
|
||||
{% set cloud_config = "--cloud-config=" + grains.cloud_config -%}
|
||||
{% endif -%}
|
||||
|
||||
{% set config = "--config=/etc/kubernetes/manifests" -%}
|
||||
|
||||
{% set manifest_url = "" -%}
|
||||
{% set manifest_url_header = "" -%}
|
||||
{% if pillar.get('enable_manifest_url', '').lower() == 'true' %}
|
||||
{% set manifest_url = "--manifest-url=" + pillar['manifest_url'] + " --manifest-url-header=" + pillar['manifest_url_header'] -%}
|
||||
{% endif -%}
|
||||
|
||||
{% set hostname_override = "" -%}
|
||||
{% if grains.hostname_override is defined -%}
|
||||
{% set hostname_override = " --hostname-override=" + grains.hostname_override -%}
|
||||
{% endif -%}
|
||||
|
||||
{% set cluster_dns = "" %}
|
||||
{% set cluster_domain = "" %}
|
||||
{% if pillar.get('enable_cluster_dns', '').lower() == 'true' %}
|
||||
{% set cluster_dns = "--cluster-dns=" + pillar['dns_server'] %}
|
||||
{% set cluster_domain = "--cluster-domain=" + pillar['dns_domain'] %}
|
||||
{% endif %}
|
||||
|
||||
{% set docker_root = "" -%}
|
||||
{% if grains.docker_root is defined -%}
|
||||
{% set docker_root = " --docker-root=" + grains.docker_root -%}
|
||||
{% endif -%}
|
||||
|
||||
{% set kubelet_root = "" -%}
|
||||
{% if grains.kubelet_root is defined -%}
|
||||
{% set kubelet_root = " --root-dir=" + grains.kubelet_root -%}
|
||||
{% endif -%}
|
||||
|
||||
{% set non_masquerade_cidr = "" -%}
|
||||
{% if pillar.get('non_masquerade_cidr','') -%}
|
||||
{% set non_masquerade_cidr = "--non-masquerade-cidr=" + pillar.non_masquerade_cidr -%}
|
||||
{% endif -%}
|
||||
|
||||
# Setup cgroups hierarchies.
|
||||
{% set cgroup_root = "" -%}
|
||||
{% set system_container = "" -%}
|
||||
{% set kubelet_container = "" -%}
|
||||
{% set runtime_container = "" -%}
|
||||
{% if grains['os_family'] == 'Debian' -%}
|
||||
{% if pillar.get('is_systemd') %}
|
||||
{% set cgroup_root = "--cgroup-root=docker" -%}
|
||||
{% else %}
|
||||
{% set cgroup_root = "--cgroup-root=/" -%}
|
||||
{% set system_container = "--system-cgroups=/system" -%}
|
||||
{% set runtime_container = "--runtime-cgroups=/docker-daemon" -%}
|
||||
{% set kubelet_container= "--kubelet-cgroups=/kubelet" -%}
|
||||
{% endif %}
|
||||
{% endif -%}
|
||||
{% if grains['oscodename'] in ['vivid','wily'] -%}
|
||||
{% set cgroup_root = "--cgroup-root=docker" -%}
|
||||
{% endif -%}
|
||||
|
||||
{% set pod_cidr = "" %}
|
||||
{% if grains['roles'][0] == 'kubernetes-master' %}
|
||||
{% if grains.get('cbr-cidr') %}
|
||||
{% set pod_cidr = "--pod-cidr=" + grains['cbr-cidr'] %}
|
||||
{% elif api_servers_with_port == '' and pillar.get('network_provider', '').lower() == 'kubenet' %}
|
||||
# Kubelet standalone mode needs a PodCIDR since there is no controller-manager
|
||||
{% set pod_cidr = "--pod-cidr=10.76.0.0/16" %}
|
||||
{% endif -%}
|
||||
{% endif %}
|
||||
|
||||
{% set cpu_cfs_quota = "" %}
|
||||
{% if pillar['enable_cpu_cfs_quota'] is defined -%}
|
||||
{% set cpu_cfs_quota = "--cpu-cfs-quota=" + pillar['enable_cpu_cfs_quota'] -%}
|
||||
{% endif -%}
|
||||
|
||||
{% set feature_gates = "" -%}
|
||||
{% if grains['feature_gates'] is defined -%}
|
||||
{% set feature_gates = "--feature-gates=" + grains['feature_gates'] -%}
|
||||
{% endif %}
|
||||
|
||||
{% set test_args = "" -%}
|
||||
{% if pillar['kubelet_test_args'] is defined -%}
|
||||
{% set test_args=pillar['kubelet_test_args'] %}
|
||||
{% endif -%}
|
||||
|
||||
{% set network_plugin = "" -%}
|
||||
{% if pillar.get('network_provider', '').lower() == 'opencontrail' %}
|
||||
{% set network_plugin = "--network-plugin=opencontrail" %}
|
||||
{% elif pillar.get('network_provider', '').lower() == 'cni' %}
|
||||
{% set network_plugin = "--network-plugin=cni --network-plugin-dir=/etc/cni/net.d/" %}
|
||||
{%elif pillar.get('network_policy_provider', '').lower() == 'calico' and grains['roles'][0] != 'kubernetes-master' -%}
|
||||
{% set network_plugin = "--network-plugin=cni --network-plugin-dir=/etc/cni/net.d/" %}
|
||||
{% elif pillar.get('network_provider', '').lower() == 'kubenet' %}
|
||||
{% set network_plugin = "--network-plugin=kubenet" -%}
|
||||
{% endif -%}
|
||||
|
||||
# Don't pipe the --hairpin-mode flag by default. This allows the kubelet to pick
|
||||
# an appropriate value.
|
||||
{% set hairpin_mode = "" -%}
|
||||
# The master cannot see Services because it doesn't run kube-proxy, so we don't
|
||||
# need to make its container bridge promiscuous. We also don't want to set
|
||||
# the hairpin-veth flag on the master because it increases the chances of
|
||||
# running into the kernel bug described in #20096.
|
||||
{% if grains['roles'][0] == 'kubernetes-master' -%}
|
||||
{% set hairpin_mode = "--hairpin-mode=none" -%}
|
||||
{% elif pillar['hairpin_mode'] is defined and pillar['hairpin_mode'] in ['promiscuous-bridge', 'hairpin-veth', 'none'] -%}
|
||||
{% set hairpin_mode = "--hairpin-mode=" + pillar['hairpin_mode'] -%}
|
||||
{% endif -%}
|
||||
|
||||
{% set babysit_daemons = "" -%}
|
||||
{% if grains['cloud'] is defined and grains.cloud in [ 'aws', 'gce' ] %}
|
||||
{% set babysit_daemons = "--babysit-daemons=true" -%}
|
||||
{% endif -%}
|
||||
|
||||
{% set kubelet_port = "" -%}
|
||||
{% if pillar['kubelet_port'] is defined -%}
|
||||
{% set kubelet_port="--port=" + pillar['kubelet_port'] %}
|
||||
{% endif -%}
|
||||
|
||||
{% set log_level = pillar['log_level'] -%}
|
||||
{% if pillar['kubelet_test_log_level'] is defined -%}
|
||||
{% set log_level = pillar['kubelet_test_log_level'] -%}
|
||||
{% endif -%}
|
||||
|
||||
{% set enable_custom_metrics = "" -%}
|
||||
{% if pillar['enable_custom_metrics'] is defined -%}
|
||||
{% set enable_custom_metrics="--enable-custom-metrics=" + pillar['enable_custom_metrics'] %}
|
||||
{% endif -%}
|
||||
|
||||
{% set node_labels = "" %}
|
||||
{% if pillar['node_labels'] is defined -%}
|
||||
{% set node_labels="--node-labels=" + pillar['node_labels'] %}
|
||||
{% endif -%}
|
||||
|
||||
{% set eviction_hard = "" %}
|
||||
{% if pillar['eviction_hard'] is defined -%}
|
||||
{% set eviction_hard="--eviction-hard=" + pillar['eviction_hard'] %}
|
||||
{% endif -%}
|
||||
|
||||
{% set kubelet_auth_ca_cert = "" %}
|
||||
{% if pillar['kubelet_auth_ca_cert'] is defined -%}
|
||||
{% set kubelet_auth_ca_cert="--anonymous-auth=false --client-ca-file=" + pillar['kubelet_auth_ca_cert'] %}
|
||||
{% endif -%}
|
||||
|
||||
# test_args has to be kept at the end, so they'll overwrite any prior configuration
|
||||
DAEMON_ARGS="{{daemon_args}} {{api_servers_with_port}} {{debugging_handlers}} {{hostname_override}} {{cloud_provider}} {{cloud_config}} {{config}} {{manifest_url}} --allow-privileged={{pillar['allow_privileged']}} {{log_level}} {{cluster_dns}} {{cluster_domain}} {{docker_root}} {{kubelet_root}} {{non_masquerade_cidr}} {{cgroup_root}} {{system_container}} {{pod_cidr}} {{ master_kubelet_args }} {{cpu_cfs_quota}} {{network_plugin}} {{kubelet_port}} {{ hairpin_mode }} {{enable_custom_metrics}} {{runtime_container}} {{kubelet_container}} {{node_labels}} {{babysit_daemons}} {{eviction_hard}} {{kubelet_auth_ca_cert}} {{feature_gates}} {{test_args}}"
|
98
vendor/k8s.io/kubernetes/cluster/saltbase/salt/kubelet/init.sls
generated
vendored
Normal file
98
vendor/k8s.io/kubernetes/cluster/saltbase/salt/kubelet/init.sls
generated
vendored
Normal file
|
@ -0,0 +1,98 @@
|
|||
{% if pillar.get('is_systemd') %}
|
||||
{% set environment_file = '/etc/sysconfig/kubelet' %}
|
||||
{% else %}
|
||||
{% set environment_file = '/etc/default/kubelet' %}
|
||||
{% endif %}
|
||||
|
||||
{{ environment_file}}:
|
||||
file.managed:
|
||||
- source: salt://kubelet/default
|
||||
- template: jinja
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 644
|
||||
|
||||
/usr/local/bin/kubelet:
|
||||
file.managed:
|
||||
- source: salt://kube-bins/kubelet
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 755
|
||||
|
||||
# The default here is that this file is blank. If this is the case, the kubelet
|
||||
# won't be able to parse it as JSON and it will not be able to publish events
|
||||
# to the apiserver. You'll see a single error line in the kubelet start up file
|
||||
# about this.
|
||||
/var/lib/kubelet/kubeconfig:
|
||||
file.managed:
|
||||
- source: salt://kubelet/kubeconfig
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 400
|
||||
- makedirs: true
|
||||
|
||||
{% if pillar['kubelet_auth_ca_cert'] is defined %}
|
||||
/var/lib/kubelet/kubelet_auth_ca.crt:
|
||||
file.managed:
|
||||
- source: salt://kubelet/kubelet_auth_ca.crt
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 400
|
||||
- makedirs: true
|
||||
{% endif %}
|
||||
|
||||
{% if pillar.get('is_systemd') %}
|
||||
|
||||
{{ pillar.get('systemd_system_path') }}/kubelet.service:
|
||||
file.managed:
|
||||
- source: salt://kubelet/kubelet.service
|
||||
- user: root
|
||||
- group: root
|
||||
|
||||
# The service.running block below doesn't work reliably
|
||||
# Instead we run our script which e.g. does a systemd daemon-reload
|
||||
# But we keep the service block below, so it can be used by dependencies
|
||||
# TODO: Fix this
|
||||
fix-service-kubelet:
|
||||
cmd.wait:
|
||||
- name: /opt/kubernetes/helpers/services bounce kubelet
|
||||
- watch:
|
||||
- file: /usr/local/bin/kubelet
|
||||
- file: {{ pillar.get('systemd_system_path') }}/kubelet.service
|
||||
- file: {{ environment_file }}
|
||||
- file: /var/lib/kubelet/kubeconfig
|
||||
- file: /var/lib/kubelet/kubelet_auth_ca.crt
|
||||
|
||||
{% else %}
|
||||
|
||||
/etc/init.d/kubelet:
|
||||
file.managed:
|
||||
- source: salt://kubelet/initd
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 755
|
||||
|
||||
{% endif %}
|
||||
|
||||
kubelet:
|
||||
service.running:
|
||||
- enable: True
|
||||
- watch:
|
||||
- file: /usr/local/bin/kubelet
|
||||
{% if pillar.get('is_systemd') %}
|
||||
- file: {{ pillar.get('systemd_system_path') }}/kubelet.service
|
||||
{% else %}
|
||||
- file: /etc/init.d/kubelet
|
||||
{% endif %}
|
||||
{% if grains['os_family'] == 'RedHat' %}
|
||||
- file: /usr/lib/systemd/system/kubelet.service
|
||||
{% endif %}
|
||||
- file: {{ environment_file }}
|
||||
- file: /var/lib/kubelet/kubeconfig
|
||||
{% if pillar['kubelet_auth_ca_cert'] is defined %}
|
||||
- file: /var/lib/kubelet/kubelet_auth_ca.crt
|
||||
{% endif %}
|
||||
{% if pillar.get('is_systemd') %}
|
||||
- provider:
|
||||
- service: systemd
|
||||
{%- endif %}
|
126
vendor/k8s.io/kubernetes/cluster/saltbase/salt/kubelet/initd
generated
vendored
Normal file
126
vendor/k8s.io/kubernetes/cluster/saltbase/salt/kubelet/initd
generated
vendored
Normal file
|
@ -0,0 +1,126 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
### BEGIN INIT INFO
|
||||
# Provides: kubelet
|
||||
# Required-Start: $local_fs $network $syslog
|
||||
# Required-Stop:
|
||||
# Default-Start: 2 3 4 5
|
||||
# Default-Stop: 0 1 6
|
||||
# Short-Description: The Kubernetes node container manager
|
||||
# Description:
|
||||
# The Kubernetes container manager maintains docker state against a state file.
|
||||
### END INIT INFO
|
||||
|
||||
|
||||
# PATH should only include /usr/* if it runs after the mountnfs.sh script
|
||||
PATH=/sbin:/usr/sbin:/bin:/usr/bin
|
||||
DESC="The Kubernetes container manager"
|
||||
NAME=kubelet
|
||||
DAEMON=/usr/local/bin/kubelet
|
||||
DAEMON_ARGS=""
|
||||
DAEMON_LOG_FILE=/var/log/$NAME.log
|
||||
PIDFILE=/var/run/$NAME.pid
|
||||
SCRIPTNAME=/etc/init.d/$NAME
|
||||
DAEMON_USER=root
|
||||
|
||||
# Exit if the package is not installed
|
||||
[ -x "$DAEMON" ] || exit 0
|
||||
|
||||
# Read configuration variable file if it is present
|
||||
[ -r /etc/default/$NAME ] && . /etc/default/$NAME
|
||||
|
||||
# Define LSB log_* functions.
|
||||
# Depend on lsb-base (>= 3.2-14) to ensure that this file is present
|
||||
# and status_of_proc is working.
|
||||
. /lib/lsb/init-functions
|
||||
|
||||
#
|
||||
# Function that starts the daemon/service
|
||||
#
|
||||
do_start()
|
||||
{
|
||||
# Avoid a potential race at boot time when both monit and init.d start
|
||||
# the same service
|
||||
PIDS=$(pidof $DAEMON)
|
||||
for PID in ${PIDS}; do
|
||||
kill -9 $PID
|
||||
done
|
||||
|
||||
# Return
|
||||
# 0 if daemon has been started
|
||||
# 1 if daemon was already running
|
||||
# 2 if daemon could not be started
|
||||
start-stop-daemon --start --quiet --background --no-close \
|
||||
--make-pidfile --pidfile $PIDFILE \
|
||||
--exec $DAEMON -c $DAEMON_USER --test > /dev/null \
|
||||
|| return 1
|
||||
start-stop-daemon --start --quiet --background --no-close \
|
||||
--make-pidfile --pidfile $PIDFILE \
|
||||
--exec $DAEMON -c $DAEMON_USER -- \
|
||||
$DAEMON_ARGS >> $DAEMON_LOG_FILE 2>&1 \
|
||||
|| return 2
|
||||
}
|
||||
|
||||
#
|
||||
# Function that stops the daemon/service
|
||||
#
|
||||
do_stop()
|
||||
{
|
||||
# Return
|
||||
# 0 if daemon has been stopped
|
||||
# 1 if daemon was already stopped
|
||||
# 2 if daemon could not be stopped
|
||||
# other if a failure occurred
|
||||
start-stop-daemon --stop --quiet --retry=TERM/30/KILL/5 --pidfile $PIDFILE --name $NAME
|
||||
RETVAL="$?"
|
||||
[ "$RETVAL" = 2 ] && return 2
|
||||
# Many daemons don't delete their pidfiles when they exit.
|
||||
rm -f $PIDFILE
|
||||
return "$RETVAL"
|
||||
}
|
||||
|
||||
|
||||
case "$1" in
|
||||
start)
|
||||
log_daemon_msg "Starting $DESC" "$NAME"
|
||||
do_start
|
||||
case "$?" in
|
||||
0|1) log_end_msg 0 || exit 0 ;;
|
||||
2) log_end_msg 1 || exit 1 ;;
|
||||
esac
|
||||
;;
|
||||
stop)
|
||||
log_daemon_msg "Stopping $DESC" "$NAME"
|
||||
do_stop
|
||||
case "$?" in
|
||||
0|1) log_end_msg 0 ;;
|
||||
2) exit 1 ;;
|
||||
esac
|
||||
;;
|
||||
status)
|
||||
status_of_proc -p $PIDFILE "$DAEMON" "$NAME" && exit 0 || exit $?
|
||||
;;
|
||||
|
||||
restart|force-reload)
|
||||
log_daemon_msg "Restarting $DESC" "$NAME"
|
||||
do_stop
|
||||
case "$?" in
|
||||
0|1)
|
||||
do_start
|
||||
case "$?" in
|
||||
0) log_end_msg 0 ;;
|
||||
1) log_end_msg 1 ;; # Old process is still running
|
||||
*) log_end_msg 1 ;; # Failed to start
|
||||
esac
|
||||
;;
|
||||
*)
|
||||
# Failed to stop
|
||||
log_end_msg 1
|
||||
;;
|
||||
esac
|
||||
;;
|
||||
*)
|
||||
echo "Usage: $SCRIPTNAME {start|stop|status|restart|force-reload}" >&2
|
||||
exit 3
|
||||
;;
|
||||
esac
|
0
vendor/k8s.io/kubernetes/cluster/saltbase/salt/kubelet/kubeconfig
generated
vendored
Normal file
0
vendor/k8s.io/kubernetes/cluster/saltbase/salt/kubelet/kubeconfig
generated
vendored
Normal file
14
vendor/k8s.io/kubernetes/cluster/saltbase/salt/kubelet/kubelet.service
generated
vendored
Normal file
14
vendor/k8s.io/kubernetes/cluster/saltbase/salt/kubelet/kubelet.service
generated
vendored
Normal file
|
@ -0,0 +1,14 @@
|
|||
[Unit]
|
||||
Description=Kubernetes Kubelet Server
|
||||
Documentation=https://github.com/kubernetes/kubernetes
|
||||
|
||||
[Service]
|
||||
EnvironmentFile=/etc/sysconfig/kubelet
|
||||
ExecStart=/usr/local/bin/kubelet "$DAEMON_ARGS"
|
||||
Restart=always
|
||||
RestartSec=2s
|
||||
StartLimitInterval=0
|
||||
KillMode=process
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
53
vendor/k8s.io/kubernetes/cluster/saltbase/salt/l7-gcp/glbc.manifest
generated
vendored
Normal file
53
vendor/k8s.io/kubernetes/cluster/saltbase/salt/l7-gcp/glbc.manifest
generated
vendored
Normal file
|
@ -0,0 +1,53 @@
|
|||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: l7-lb-controller-v0.8.0
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: glbc
|
||||
version: v0.8.0
|
||||
kubernetes.io/cluster-service: "true"
|
||||
kubernetes.io/name: "GLBC"
|
||||
spec:
|
||||
terminationGracePeriodSeconds: 600
|
||||
hostNetwork: true
|
||||
containers:
|
||||
- image: gcr.io/google_containers/glbc:0.9.0
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 8086
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 30
|
||||
# healthz reaches out to GCE
|
||||
periodSeconds: 30
|
||||
timeoutSeconds: 15
|
||||
successThreshold: 1
|
||||
failureThreshold: 5
|
||||
name: l7-lb-controller
|
||||
volumeMounts:
|
||||
- mountPath: /etc/gce.conf
|
||||
name: cloudconfig
|
||||
readOnly: true
|
||||
- mountPath: /var/log/glbc.log
|
||||
name: logfile
|
||||
readOnly: false
|
||||
resources:
|
||||
# Request is set to accomodate this pod alongside the other
|
||||
# master components on a single core master.
|
||||
# TODO: Make resource requirements depend on the size of the cluster
|
||||
requests:
|
||||
cpu: 10m
|
||||
memory: 50Mi
|
||||
command:
|
||||
# TODO: split this out into args when we no longer need to pipe stdout to a file #6428
|
||||
- sh
|
||||
- -c
|
||||
- '/glbc --verbose=true --default-backend-service=kube-system/default-http-backend --sync-period=60s --running-in-cluster=false --use-real-cloud=true --config-file-path=/etc/gce.conf --healthz-port=8086 1>>/var/log/glbc.log 2>&1'
|
||||
volumes:
|
||||
- hostPath:
|
||||
path: /etc/gce.conf
|
||||
name: cloudconfig
|
||||
- hostPath:
|
||||
path: /var/log/glbc.log
|
||||
name: logfile
|
17
vendor/k8s.io/kubernetes/cluster/saltbase/salt/l7-gcp/init.sls
generated
vendored
Normal file
17
vendor/k8s.io/kubernetes/cluster/saltbase/salt/l7-gcp/init.sls
generated
vendored
Normal file
|
@ -0,0 +1,17 @@
|
|||
/etc/kubernetes/manifests/glbc.manifest:
|
||||
file.managed:
|
||||
- source: salt://l7-gcp/glbc.manifest
|
||||
- template: jinja
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 644
|
||||
- makedirs: true
|
||||
- dir_mode: 755
|
||||
|
||||
/var/log/glbc.log:
|
||||
file.managed:
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 644
|
||||
|
||||
|
13
vendor/k8s.io/kubernetes/cluster/saltbase/salt/logrotate/conf
generated
vendored
Normal file
13
vendor/k8s.io/kubernetes/cluster/saltbase/salt/logrotate/conf
generated
vendored
Normal file
|
@ -0,0 +1,13 @@
|
|||
/var/log/{{ file }}.log {
|
||||
rotate 5
|
||||
copytruncate
|
||||
missingok
|
||||
notifempty
|
||||
compress
|
||||
maxsize 100M
|
||||
daily
|
||||
dateext
|
||||
dateformat -%Y%m%d-%s
|
||||
create 0644 root root
|
||||
}
|
||||
|
2
vendor/k8s.io/kubernetes/cluster/saltbase/salt/logrotate/cron
generated
vendored
Executable file
2
vendor/k8s.io/kubernetes/cluster/saltbase/salt/logrotate/cron
generated
vendored
Executable file
|
@ -0,0 +1,2 @@
|
|||
#!/bin/sh
|
||||
logrotate /etc/logrotate.conf
|
10
vendor/k8s.io/kubernetes/cluster/saltbase/salt/logrotate/docker-containers
generated
vendored
Normal file
10
vendor/k8s.io/kubernetes/cluster/saltbase/salt/logrotate/docker-containers
generated
vendored
Normal file
|
@ -0,0 +1,10 @@
|
|||
/var/lib/docker/containers/*/*-json.log {
|
||||
rotate 5
|
||||
copytruncate
|
||||
missingok
|
||||
notifempty
|
||||
compress
|
||||
maxsize 10M
|
||||
daily
|
||||
create 0644 root root
|
||||
}
|
35
vendor/k8s.io/kubernetes/cluster/saltbase/salt/logrotate/init.sls
generated
vendored
Normal file
35
vendor/k8s.io/kubernetes/cluster/saltbase/salt/logrotate/init.sls
generated
vendored
Normal file
|
@ -0,0 +1,35 @@
|
|||
logrotate:
|
||||
pkg:
|
||||
- installed
|
||||
|
||||
{% set logrotate_files = ['kube-scheduler', 'kube-proxy', 'kubelet', 'kube-apiserver', 'kube-controller-manager', 'kube-addons', 'docker'] %}
|
||||
{% for file in logrotate_files %}
|
||||
/etc/logrotate.d/{{ file }}:
|
||||
file:
|
||||
- managed
|
||||
- source: salt://logrotate/conf
|
||||
- template: jinja
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 644
|
||||
- context:
|
||||
file: {{ file }}
|
||||
{% endfor %}
|
||||
|
||||
/etc/logrotate.d/docker-containers:
|
||||
file:
|
||||
- managed
|
||||
- source: salt://logrotate/docker-containers
|
||||
- template: jinja
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 644
|
||||
|
||||
/etc/cron.hourly/logrotate:
|
||||
file:
|
||||
- managed
|
||||
- source: salt://logrotate/cron
|
||||
- template: jinja
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 755
|
64
vendor/k8s.io/kubernetes/cluster/saltbase/salt/nginx/init.sls
generated
vendored
Normal file
64
vendor/k8s.io/kubernetes/cluster/saltbase/salt/nginx/init.sls
generated
vendored
Normal file
|
@ -0,0 +1,64 @@
|
|||
nginx:
|
||||
pkg:
|
||||
- installed
|
||||
|
||||
/etc/nginx/nginx.conf:
|
||||
file:
|
||||
- managed
|
||||
- source: salt://nginx/nginx.conf
|
||||
- template: jinja
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 644
|
||||
|
||||
/etc/nginx/sites-enabled/default:
|
||||
file:
|
||||
- managed
|
||||
- makedirs: true
|
||||
- source: salt://nginx/kubernetes-site
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 644
|
||||
|
||||
/usr/share/nginx/htpasswd:
|
||||
file:
|
||||
- managed
|
||||
- source: salt://nginx/htpasswd
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 644
|
||||
|
||||
{% if grains.cloud is defined and grains.cloud in ['gce'] %}
|
||||
/etc/kubernetes/manifests/nginx.json:
|
||||
file:
|
||||
- managed
|
||||
- source: salt://nginx/nginx.json
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 644
|
||||
- require:
|
||||
- file: /etc/nginx/nginx.conf
|
||||
- file: /etc/nginx/sites-enabled/default
|
||||
- file: /usr/share/nginx/htpasswd
|
||||
- cmd: kubernetes-cert
|
||||
|
||||
|
||||
#stop legacy nginx_service
|
||||
stop_nginx-service:
|
||||
service.dead:
|
||||
- name: nginx
|
||||
- enable: None
|
||||
|
||||
{% else %}
|
||||
nginx-service:
|
||||
service:
|
||||
- running
|
||||
- name: nginx
|
||||
- watch:
|
||||
- pkg: nginx
|
||||
- file: /etc/nginx/nginx.conf
|
||||
- file: /etc/nginx/sites-enabled/default
|
||||
- file: /usr/share/nginx/htpasswd
|
||||
- cmd: kubernetes-cert
|
||||
{% endif %}
|
||||
|
66
vendor/k8s.io/kubernetes/cluster/saltbase/salt/nginx/kubernetes-site
generated
vendored
Normal file
66
vendor/k8s.io/kubernetes/cluster/saltbase/salt/nginx/kubernetes-site
generated
vendored
Normal file
|
@ -0,0 +1,66 @@
|
|||
#server {
|
||||
#listen 80; ## listen for ipv4; this line is default and implied
|
||||
#listen [::]:80 default_server ipv6only=on; ## listen for ipv6
|
||||
|
||||
# root /usr/share/nginx/www;
|
||||
# index index.html index.htm;
|
||||
|
||||
# Make site accessible from http://localhost/
|
||||
# server_name localhost;
|
||||
# location / {
|
||||
# auth_basic "Restricted";
|
||||
# auth_basic_user_file /usr/share/nginx/htpasswd;
|
||||
|
||||
# Proxy settings.
|
||||
# proxy_pass http://localhost:8080/;
|
||||
# proxy_connect_timeout 159s;
|
||||
# proxy_send_timeout 600s;
|
||||
# proxy_read_timeout 600s;
|
||||
# proxy_buffer_size 64k;
|
||||
# proxy_buffers 16 32k;
|
||||
# proxy_busy_buffers_size 64k;
|
||||
# proxy_temp_file_write_size 64k;
|
||||
# }
|
||||
#}
|
||||
|
||||
# HTTPS server
|
||||
#
|
||||
server {
|
||||
listen 443;
|
||||
server_name localhost;
|
||||
|
||||
root html;
|
||||
index index.html index.htm;
|
||||
|
||||
ssl on;
|
||||
ssl_certificate /srv/kubernetes/server.cert;
|
||||
ssl_certificate_key /srv/kubernetes/server.key;
|
||||
|
||||
ssl_session_timeout 5m;
|
||||
|
||||
# don't use SSLv3 because of POODLE
|
||||
ssl_protocols TLSv1 TLSv1.1 TLSv1.2;
|
||||
ssl_ciphers ECDH+AESGCM:DH+AESGCM:ECDH+AES256:DH+AES256:ECDH+AES128:DH+AES:ECDH+3DES:DH+3DES:RSA+AESGCM:RSA+AES:RSA+3DES:!aNULL:!MD5:!DSS;
|
||||
ssl_prefer_server_ciphers on;
|
||||
|
||||
location / {
|
||||
auth_basic "Restricted";
|
||||
auth_basic_user_file /usr/share/nginx/htpasswd;
|
||||
|
||||
# Proxy settings
|
||||
# disable buffering so that watch works
|
||||
proxy_buffering off;
|
||||
proxy_pass http://127.0.0.1:8080/;
|
||||
proxy_connect_timeout 159s;
|
||||
proxy_send_timeout 600s;
|
||||
proxy_read_timeout 600s;
|
||||
|
||||
# Disable retry
|
||||
proxy_next_upstream off;
|
||||
|
||||
# Support web sockets
|
||||
proxy_http_version 1.1;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection "upgrade";
|
||||
}
|
||||
}
|
61
vendor/k8s.io/kubernetes/cluster/saltbase/salt/nginx/nginx.conf
generated
vendored
Normal file
61
vendor/k8s.io/kubernetes/cluster/saltbase/salt/nginx/nginx.conf
generated
vendored
Normal file
|
@ -0,0 +1,61 @@
|
|||
{% if grains['os_family'] == 'RedHat' %}
|
||||
user nginx;
|
||||
{% else %}
|
||||
user www-data;
|
||||
{% endif %}
|
||||
|
||||
worker_processes 4;
|
||||
pid /var/run/nginx.pid;
|
||||
|
||||
events {
|
||||
worker_connections 768;
|
||||
# multi_accept on;
|
||||
}
|
||||
|
||||
http {
|
||||
|
||||
##
|
||||
# Basic Settings
|
||||
##
|
||||
|
||||
sendfile on;
|
||||
tcp_nopush on;
|
||||
tcp_nodelay on;
|
||||
keepalive_timeout 65;
|
||||
types_hash_max_size 2048;
|
||||
# server_tokens off;
|
||||
|
||||
# server_names_hash_bucket_size 64;
|
||||
# server_name_in_redirect off;
|
||||
|
||||
include /etc/nginx/mime.types;
|
||||
default_type application/octet-stream;
|
||||
|
||||
##
|
||||
# Logging Settings
|
||||
##
|
||||
|
||||
access_log /var/log/nginx/access.log;
|
||||
error_log /var/log/nginx/error.log;
|
||||
|
||||
##
|
||||
# Gzip Settings
|
||||
##
|
||||
|
||||
gzip on;
|
||||
gzip_disable "msie6";
|
||||
|
||||
# gzip_vary on;
|
||||
# gzip_proxied any;
|
||||
# gzip_comp_level 6;
|
||||
# gzip_buffers 16 8k;
|
||||
# gzip_http_version 1.1;
|
||||
# gzip_types text/plain text/css application/json application/x-javascript text/xml application/xml application/xml+rss text/javascript;
|
||||
|
||||
##
|
||||
# Virtual Host Configs
|
||||
##
|
||||
|
||||
include /etc/nginx/conf.d/*.conf;
|
||||
include /etc/nginx/sites-enabled/*;
|
||||
}
|
60
vendor/k8s.io/kubernetes/cluster/saltbase/salt/nginx/nginx.json
generated
vendored
Normal file
60
vendor/k8s.io/kubernetes/cluster/saltbase/salt/nginx/nginx.json
generated
vendored
Normal file
|
@ -0,0 +1,60 @@
|
|||
{
|
||||
"apiVersion": "v1",
|
||||
"kind": "Pod",
|
||||
"metadata": {"name":"nginx"},
|
||||
"spec":{
|
||||
"hostNetwork": true,
|
||||
"containers":[
|
||||
{
|
||||
"name": "nginx",
|
||||
"image": "gcr.io/google-containers/nginx:v1",
|
||||
"resources": {
|
||||
"limits": {
|
||||
"cpu": "200m"
|
||||
}
|
||||
},
|
||||
"command": [
|
||||
"nginx",
|
||||
"-g",
|
||||
"daemon off;"
|
||||
],
|
||||
"ports":[
|
||||
{ "name": "https",
|
||||
"containerPort": 443,
|
||||
"hostPort": 443}
|
||||
],
|
||||
"volumeMounts": [
|
||||
{ "name": "nginx",
|
||||
"mountPath": "/etc/nginx",
|
||||
"readOnly": true},
|
||||
{ "name": "k8s",
|
||||
"mountPath": "/srv/kubernetes",
|
||||
"readOnly": true},
|
||||
{ "name": "logs",
|
||||
"mountPath": "/var/log/nginx",
|
||||
"readOnly": false},
|
||||
{ "name": "passwd",
|
||||
"mountPath": "/usr/share/nginx",
|
||||
"readOnly": true}
|
||||
]
|
||||
}
|
||||
],
|
||||
"volumes":[
|
||||
{ "name": "nginx",
|
||||
"hostPath": {
|
||||
"path": "/etc/nginx"}
|
||||
},
|
||||
{ "name": "k8s",
|
||||
"hostPath": {
|
||||
"path": "/srv/kubernetes"}
|
||||
},
|
||||
{ "name": "passwd",
|
||||
"hostPath": {
|
||||
"path": "/usr/share/nginx"}
|
||||
},
|
||||
{ "name": "logs",
|
||||
"hostPath": {
|
||||
"path": "/var/logs/nginx"}
|
||||
}
|
||||
]
|
||||
}}
|
11
vendor/k8s.io/kubernetes/cluster/saltbase/salt/ntp/init.sls
generated
vendored
Normal file
11
vendor/k8s.io/kubernetes/cluster/saltbase/salt/ntp/init.sls
generated
vendored
Normal file
|
@ -0,0 +1,11 @@
|
|||
ntp:
|
||||
pkg:
|
||||
- installed
|
||||
|
||||
ntp-service:
|
||||
service:
|
||||
- running
|
||||
- name: ntp
|
||||
- watch:
|
||||
- pkg: ntp
|
||||
|
15
vendor/k8s.io/kubernetes/cluster/saltbase/salt/opencontrail-networking-master/init.sls
generated
vendored
Normal file
15
vendor/k8s.io/kubernetes/cluster/saltbase/salt/opencontrail-networking-master/init.sls
generated
vendored
Normal file
|
@ -0,0 +1,15 @@
|
|||
opencontrail-networking-master:
|
||||
cmd.script:
|
||||
- unless: test -f /var/log/contrail/provision_master.log
|
||||
- env:
|
||||
- 'OPENCONTRAIL_TAG': '{{ pillar.get('opencontrail_tag') }}'
|
||||
- 'OPENCONTRAIL_KUBERNETES_TAG': '{{ pillar.get('opencontrail_kubernetes_tag') }}'
|
||||
- 'OPENCONTRAIL_PUBLIC_SUBNET': '{{ pillar.get('opencontrail_public_subnet') }}'
|
||||
- 'SERVICE_CLUSTER_IP_RANGE': '{{ pillar.get('service_cluster_ip_range') }}'
|
||||
- source: https://raw.githubusercontent.com/juniper/contrail-kubernetes/{{ pillar.get('opencontrail_kubernetes_tag') }}/cluster/provision_master.sh
|
||||
- source_hash: https://raw.githubusercontent.com/juniper/contrail-kubernetes/{{ pillar.get('opencontrail_kubernetes_tag') }}/cluster/manifests.hash
|
||||
- cwd: /
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 755
|
||||
- shell: /bin/bash
|
15
vendor/k8s.io/kubernetes/cluster/saltbase/salt/opencontrail-networking-minion/init.sls
generated
vendored
Normal file
15
vendor/k8s.io/kubernetes/cluster/saltbase/salt/opencontrail-networking-minion/init.sls
generated
vendored
Normal file
|
@ -0,0 +1,15 @@
|
|||
opencontrail-networking-minion:
|
||||
cmd.script:
|
||||
- unless: test -f /var/log/contrail/provision_minion.log
|
||||
- env:
|
||||
- 'OPENCONTRAIL_TAG': '{{ pillar.get('opencontrail_tag') }}'
|
||||
- 'OPENCONTRAIL_KUBERNETES_TAG': '{{ pillar.get('opencontrail_kubernetes_tag') }}'
|
||||
- 'OPENCONTRAIL_PUBLIC_SUBNET': '{{ pillar.get('opencontrail_public_subnet') }}'
|
||||
- 'SERVICE_CLUSTER_IP_RANGE': '{{ pillar.get('service_cluster_ip_range') }}'
|
||||
- source: https://raw.githubusercontent.com/juniper/contrail-kubernetes/{{ pillar.get('opencontrail_kubernetes_tag') }}/cluster/provision_minion.sh
|
||||
- source_hash: https://raw.githubusercontent.com/juniper/contrail-kubernetes/{{ pillar.get('opencontrail_kubernetes_tag') }}/cluster/manifests.hash
|
||||
- cwd: /
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 755
|
||||
- shell: /bin/bash
|
53
vendor/k8s.io/kubernetes/cluster/saltbase/salt/openvpn-client/client.conf
generated
vendored
Normal file
53
vendor/k8s.io/kubernetes/cluster/saltbase/salt/openvpn-client/client.conf
generated
vendored
Normal file
|
@ -0,0 +1,53 @@
|
|||
# Specify that we are a client and that we
|
||||
# will be pulling certain config file directives
|
||||
# from the server.
|
||||
client
|
||||
|
||||
# Use the same setting as you are using on
|
||||
# the server.
|
||||
# On most systems, the VPN will not function
|
||||
# unless you partially or fully disable
|
||||
# the firewall for the TUN/TAP interface.
|
||||
dev tun
|
||||
|
||||
# Are we connecting to a TCP or
|
||||
# UDP server? Use the same setting as
|
||||
# on the server.
|
||||
proto udp
|
||||
|
||||
# The hostname/IP and port of the server.
|
||||
# You can have multiple remote entries
|
||||
# to load balance between the servers.
|
||||
remote {{ salt['mine.get']('roles:kubernetes-master', 'network.ip_addrs', 'grain').keys()[0] }} 1194
|
||||
|
||||
# Keep trying indefinitely to resolve the
|
||||
# host name of the OpenVPN server. Very useful
|
||||
# on machines which are not permanently connected
|
||||
# to the internet such as laptops.
|
||||
resolv-retry infinite
|
||||
|
||||
# Most clients don't need to bind to
|
||||
# a specific local port number.
|
||||
nobind
|
||||
|
||||
# Try to preserve some state across restarts.
|
||||
persist-key
|
||||
persist-tun
|
||||
|
||||
# SSL/TLS parms.
|
||||
# See the server config file for more
|
||||
# description. It's best to use
|
||||
# a separate .crt/.key file pair
|
||||
# for each client. A single ca
|
||||
# file can be used for all clients.
|
||||
ca /etc/openvpn/ca.crt
|
||||
cert /etc/openvpn/client.crt
|
||||
key /etc/openvpn/client.key
|
||||
|
||||
# Enable compression on the VPN link.
|
||||
# Don't enable this unless it is also
|
||||
# enabled in the server config file.
|
||||
comp-lzo
|
||||
|
||||
# Set log file verbosity.
|
||||
verb 3
|
16
vendor/k8s.io/kubernetes/cluster/saltbase/salt/openvpn-client/init.sls
generated
vendored
Normal file
16
vendor/k8s.io/kubernetes/cluster/saltbase/salt/openvpn-client/init.sls
generated
vendored
Normal file
|
@ -0,0 +1,16 @@
|
|||
/etc/openvpn/client.conf:
|
||||
file.managed:
|
||||
- source: salt://openvpn-client/client.conf
|
||||
- template: jinja
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 644
|
||||
- makedirs: True
|
||||
|
||||
openvpn:
|
||||
pkg:
|
||||
- latest
|
||||
service.running:
|
||||
- enable: True
|
||||
- watch:
|
||||
- file: /etc/openvpn/client.conf
|
31
vendor/k8s.io/kubernetes/cluster/saltbase/salt/openvpn/init.sls
generated
vendored
Normal file
31
vendor/k8s.io/kubernetes/cluster/saltbase/salt/openvpn/init.sls
generated
vendored
Normal file
|
@ -0,0 +1,31 @@
|
|||
/etc/openvpn/server.conf:
|
||||
file.managed:
|
||||
- source: salt://openvpn/server.conf
|
||||
- template: jinja
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 644
|
||||
- makedirs: True
|
||||
|
||||
{% for minion in salt['mine.get']('roles:kubernetes-pool', 'grains.items', expr_form='grain').values() %}
|
||||
/etc/openvpn/ccd/{{ minion['hostnamef'] }}:
|
||||
file.managed:
|
||||
- contents: "iroute {{ minion['cbr-string'] }}\n"
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 644
|
||||
- makedirs: True
|
||||
{% endfor %}
|
||||
|
||||
openssl dhparam -out /etc/openvpn/dh1024.pem 1024:
|
||||
cmd.run:
|
||||
- creates: /etc/openvpn/dh1024.pem
|
||||
- unless: file /etc/openvpn/dh1024.pem
|
||||
|
||||
openvpn:
|
||||
pkg:
|
||||
- latest
|
||||
service.running:
|
||||
- enable: True
|
||||
- watch:
|
||||
- file: /etc/openvpn/server.conf
|
123
vendor/k8s.io/kubernetes/cluster/saltbase/salt/openvpn/server.conf
generated
vendored
Normal file
123
vendor/k8s.io/kubernetes/cluster/saltbase/salt/openvpn/server.conf
generated
vendored
Normal file
|
@ -0,0 +1,123 @@
|
|||
# Which TCP/UDP port should OpenVPN listen on?
|
||||
# If you want to run multiple OpenVPN instances
|
||||
# on the same machine, use a different port
|
||||
# number for each one. You will need to
|
||||
# open up this port on your firewall.
|
||||
port 1194
|
||||
|
||||
# TCP or UDP server?
|
||||
proto udp
|
||||
|
||||
# "dev tun" will create a routed IP tunnel,
|
||||
# "dev tap" will create an ethernet tunnel.
|
||||
# Use "dev tap0" if you are ethernet bridging
|
||||
# and have precreated a tap0 virtual interface
|
||||
# and bridged it with your ethernet interface.
|
||||
# If you want to control access policies
|
||||
# over the VPN, you must create firewall
|
||||
# rules for the the TUN/TAP interface.
|
||||
# On non-Windows systems, you can give
|
||||
# an explicit unit number, such as tun0.
|
||||
# On Windows, use "dev-node" for this.
|
||||
# On most systems, the VPN will not function
|
||||
# unless you partially or fully disable
|
||||
# the firewall for the TUN/TAP interface.
|
||||
dev tun
|
||||
|
||||
# SSL/TLS root certificate (ca), certificate
|
||||
# (cert), and private key (key). Each client
|
||||
# and the server must have their own cert and
|
||||
# key file. The server and all clients will
|
||||
# use the same ca file.
|
||||
#
|
||||
# See the "easy-rsa" directory for a series
|
||||
# of scripts for generating RSA certificates
|
||||
# and private keys. Remember to use
|
||||
# a unique Common Name for the server
|
||||
# and each of the client certificates.
|
||||
#
|
||||
# Any X509 key management system can be used.
|
||||
# OpenVPN can also use a PKCS #12 formatted key file
|
||||
# (see "pkcs12" directive in man page).
|
||||
ca /etc/openvpn/ca.crt
|
||||
cert /etc/openvpn/server.crt
|
||||
key /etc/openvpn/server.key # This file should be kept secret
|
||||
|
||||
# Diffie hellman parameters.
|
||||
# Generate your own with:
|
||||
# openssl dhparam -out dh1024.pem 1024
|
||||
# Substitute 2048 for 1024 if you are using
|
||||
# 2048 bit keys.
|
||||
dh /etc/openvpn/dh1024.pem
|
||||
|
||||
# Configure server mode and supply a VPN subnet
|
||||
# for OpenVPN to draw client addresses from.
|
||||
# The server will take 10.8.0.1 for itself,
|
||||
# the rest will be made available to clients.
|
||||
# Each client will be able to reach the server
|
||||
# on 10.8.0.1. Comment this line out if you are
|
||||
# ethernet bridging. See the man page for more info.
|
||||
server 10.8.0.0 255.255.255.0
|
||||
|
||||
# Maintain a record of client <-> virtual IP address
|
||||
# associations in this file. If OpenVPN goes down or
|
||||
# is restarted, reconnecting clients can be assigned
|
||||
# the same virtual IP address from the pool that was
|
||||
# previously assigned.
|
||||
ifconfig-pool-persist ipp.txt
|
||||
|
||||
# To assign specific IP addresses to specific
|
||||
# clients or if a connecting client has a private
|
||||
# subnet behind it that should also have VPN access,
|
||||
# use the subdirectory "ccd" for client-specific
|
||||
# configuration files (see man page for more info).
|
||||
|
||||
client-config-dir /etc/openvpn/ccd
|
||||
|
||||
{% for minion in salt['mine.get']('roles:kubernetes-pool', 'grains.items', expr_form='grain').values() %}
|
||||
push "route {{ minion['cbr-string'] }}"
|
||||
route {{ minion['cbr-string'] }}
|
||||
{% endfor %}
|
||||
|
||||
# Uncomment this directive to allow different
|
||||
# clients to be able to "see" each other.
|
||||
# By default, clients will only see the server.
|
||||
# To force clients to only see the server, you
|
||||
# will also need to appropriately firewall the
|
||||
# server's TUN/TAP interface.
|
||||
client-to-client
|
||||
|
||||
# The keepalive directive causes ping-like
|
||||
# messages to be sent back and forth over
|
||||
# the link so that each side knows when
|
||||
# the other side has gone down.
|
||||
# Ping every 10 seconds, assume that remote
|
||||
# peer is down if no ping received during
|
||||
# a 120 second time period.
|
||||
keepalive 10 120
|
||||
|
||||
# Enable compression on the VPN link.
|
||||
# If you enable it here, you must also
|
||||
# enable it in the client config file.
|
||||
comp-lzo
|
||||
|
||||
# The persist options will try to avoid
|
||||
# accessing certain resources on restart
|
||||
# that may no longer be accessible because
|
||||
# of the privilege downgrade.
|
||||
persist-key
|
||||
persist-tun
|
||||
|
||||
# Output a short status file showing
|
||||
# current connections, truncated
|
||||
# and rewritten every minute.
|
||||
status openvpn-status.log
|
||||
|
||||
# Set the appropriate level of log
|
||||
# file verbosity.
|
||||
#
|
||||
# 0 is silent, except for fatal errors
|
||||
# 4 is reasonable for general usage
|
||||
# 5 and 6 can help to debug connection problems
|
||||
# 9 is extremely verbose
|
||||
verb 3
|
15
vendor/k8s.io/kubernetes/cluster/saltbase/salt/rescheduler/init.sls
generated
vendored
Normal file
15
vendor/k8s.io/kubernetes/cluster/saltbase/salt/rescheduler/init.sls
generated
vendored
Normal file
|
@ -0,0 +1,15 @@
|
|||
/etc/kubernetes/manifests/rescheduler.manifest:
|
||||
file.managed:
|
||||
- source: salt://rescheduler/rescheduler.manifest
|
||||
- template: jinja
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 644
|
||||
- makedirs: true
|
||||
- dir_mode: 755
|
||||
|
||||
/var/log/rescheduler.log:
|
||||
file.managed:
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 644
|
33
vendor/k8s.io/kubernetes/cluster/saltbase/salt/rescheduler/rescheduler.manifest
generated
vendored
Normal file
33
vendor/k8s.io/kubernetes/cluster/saltbase/salt/rescheduler/rescheduler.manifest
generated
vendored
Normal file
|
@ -0,0 +1,33 @@
|
|||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: rescheduler-v0.2.1
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: rescheduler
|
||||
version: v0.2.1
|
||||
kubernetes.io/cluster-service: "true"
|
||||
kubernetes.io/name: "Rescheduler"
|
||||
spec:
|
||||
hostNetwork: true
|
||||
containers:
|
||||
- image: gcr.io/google_containers/rescheduler:v0.2.1
|
||||
name: rescheduler
|
||||
volumeMounts:
|
||||
- mountPath: /var/log/rescheduler.log
|
||||
name: logfile
|
||||
readOnly: false
|
||||
# TODO: Make resource requirements depend on the size of the cluster
|
||||
resources:
|
||||
requests:
|
||||
cpu: 10m
|
||||
memory: 100Mi
|
||||
command:
|
||||
# TODO: split this out into args when we no longer need to pipe stdout to a file #6428
|
||||
- sh
|
||||
- -c
|
||||
- '/rescheduler --running-in-cluster=false 1>>/var/log/rescheduler.log 2>&1'
|
||||
volumes:
|
||||
- hostPath:
|
||||
path: /var/log/rescheduler.log
|
||||
name: logfile
|
24
vendor/k8s.io/kubernetes/cluster/saltbase/salt/salt-helpers/init.sls
generated
vendored
Normal file
24
vendor/k8s.io/kubernetes/cluster/saltbase/salt/salt-helpers/init.sls
generated
vendored
Normal file
|
@ -0,0 +1,24 @@
|
|||
/opt/kubernetes/helpers:
|
||||
file.directory:
|
||||
- user: root
|
||||
- group: root
|
||||
- makedirs: True
|
||||
- dir_mode: 755
|
||||
|
||||
{% if pillar.get('is_systemd') %}
|
||||
/opt/kubernetes/helpers/services:
|
||||
file.managed:
|
||||
- source: salt://salt-helpers/services
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 755
|
||||
{% endif %}
|
||||
|
||||
{% if grains.get('os_family', '') == 'Debian' -%}
|
||||
/opt/kubernetes/helpers/pkg:
|
||||
file.managed:
|
||||
- source: salt://salt-helpers/pkg-apt
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 755
|
||||
{% endif %}
|
70
vendor/k8s.io/kubernetes/cluster/saltbase/salt/salt-helpers/pkg-apt
generated
vendored
Normal file
70
vendor/k8s.io/kubernetes/cluster/saltbase/salt/salt-helpers/pkg-apt
generated
vendored
Normal file
|
@ -0,0 +1,70 @@
|
|||
#!/bin/bash
|
||||
|
||||
# Copyright 2016 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Helper script that installs a package, wrapping it with a policy that
|
||||
# means we won't try to start services.
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
ACTION=${1}
|
||||
NAME=${2}
|
||||
VERSION=${3}
|
||||
SRC=${4}
|
||||
|
||||
if [[ -z "${ACTION}" || -z "${NAME}" || -z "${VERSION}" || -z "${SRC}" ]]; then
|
||||
echo "Syntax: ${0} <action> <name> <version> <src>"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
old_policy=""
|
||||
|
||||
function install_no_start {
|
||||
# Query the existing installed version, assuming that an error means package not found
|
||||
existing=`dpkg-query -W -f='${Version}' ${NAME} 2>/dev/null || echo ""`
|
||||
if [[ -n "${existing}" ]]; then
|
||||
if [[ "${existing}" == "${VERSION}" ]]; then
|
||||
return
|
||||
fi
|
||||
echo "Different version of package ${NAME} installed: ${VERSION} vs ${existing}"
|
||||
fi
|
||||
|
||||
if [[ -e "/usr/sbin/policy-rc.d" ]]; then
|
||||
tmpfile=`mktemp`
|
||||
mv /usr/sbin/policy-rc.d ${tmpfile}
|
||||
old_policy=${tmpfile}
|
||||
fi
|
||||
trap cleanup EXIT
|
||||
echo -e '#!/bin/sh\nexit 101' > /usr/sbin/policy-rc.d
|
||||
chmod 755 /usr/sbin/policy-rc.d
|
||||
|
||||
echo "Installing package ${NAME} from ${SRC}"
|
||||
dpkg --install ${SRC}
|
||||
}
|
||||
|
||||
function cleanup {
|
||||
rm -f /usr/sbin/policy-rc.d
|
||||
if [[ -n "${old_policy}" ]]; then
|
||||
mv ${old_policy} /usr/sbin/policy-rc.d
|
||||
fi
|
||||
}
|
||||
|
||||
if [[ "${ACTION}" == "install-no-start" ]]; then
|
||||
install_no_start
|
||||
else
|
||||
echo "Unknown action: ${ACTION}"
|
||||
exit 1
|
||||
fi
|
72
vendor/k8s.io/kubernetes/cluster/saltbase/salt/salt-helpers/services
generated
vendored
Normal file
72
vendor/k8s.io/kubernetes/cluster/saltbase/salt/salt-helpers/services
generated
vendored
Normal file
|
@ -0,0 +1,72 @@
|
|||
#!/bin/bash
|
||||
|
||||
# Copyright 2015 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
ACTION=${1}
|
||||
SERVICE=${2}
|
||||
|
||||
if [[ -z "${ACTION}" || -z "${SERVICE}" ]]; then
|
||||
echo "Syntax: ${0} <action> <service>"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
|
||||
function reload_state() {
|
||||
systemctl daemon-reload
|
||||
}
|
||||
|
||||
function start_service() {
|
||||
systemctl start ${SERVICE}
|
||||
}
|
||||
|
||||
function stop_service() {
|
||||
systemctl stop ${SERVICE}
|
||||
}
|
||||
|
||||
function enable_service() {
|
||||
systemctl enable ${SERVICE}
|
||||
}
|
||||
|
||||
function disable_service() {
|
||||
systemctl disable ${SERVICE}
|
||||
}
|
||||
|
||||
function restart_service() {
|
||||
systemctl restart ${SERVICE}
|
||||
}
|
||||
|
||||
if [[ "${ACTION}" == "up" ]]; then
|
||||
reload_state
|
||||
enable_service
|
||||
start_service
|
||||
elif [[ "${ACTION}" == "bounce" ]]; then
|
||||
reload_state
|
||||
enable_service
|
||||
restart_service
|
||||
elif [[ "${ACTION}" == "down" ]]; then
|
||||
reload_state
|
||||
disable_service
|
||||
stop_service
|
||||
elif [[ "${ACTION}" == "enable" ]]; then
|
||||
reload_state
|
||||
enable_service
|
||||
else
|
||||
echo "Unknown action: ${ACTION}"
|
||||
exit 1
|
||||
fi
|
87
vendor/k8s.io/kubernetes/cluster/saltbase/salt/supervisor/docker-checker.sh
generated
vendored
Executable file
87
vendor/k8s.io/kubernetes/cluster/saltbase/salt/supervisor/docker-checker.sh
generated
vendored
Executable file
|
@ -0,0 +1,87 @@
|
|||
#!/bin/bash
|
||||
|
||||
# Copyright 2015 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# This script is intended to start the docker and then loop until
|
||||
# it detects a failure. It then exits, and supervisord restarts it
|
||||
# which in turn restarts docker.
|
||||
|
||||
main() {
|
||||
if ! healthy 60; then
|
||||
stop_docker
|
||||
start_docker
|
||||
echo "waiting 30s for startup"
|
||||
sleep 30
|
||||
healthy 60
|
||||
fi
|
||||
|
||||
while healthy; do
|
||||
sleep 10
|
||||
done
|
||||
|
||||
echo "Docker failed!"
|
||||
exit 2
|
||||
}
|
||||
|
||||
# Performs health check on docker. If a parameter is passed, it is treated as
|
||||
# the number of seconds to keep trying for a healthy result. If none is passed
|
||||
# we make only one attempt.
|
||||
healthy() {
|
||||
max_retry_sec="$1"
|
||||
shift
|
||||
|
||||
starttime=$(date +%s)
|
||||
while ! timeout 60 docker ps > /dev/null; do
|
||||
if [[ -z "$max_retry_sec" || $(( $(date +%s) - starttime )) -gt "$max_retry_sec" ]]; then
|
||||
echo "docker ps did not succeed"
|
||||
return 2
|
||||
else
|
||||
echo "waiting 5s before retry"
|
||||
sleep 5
|
||||
fi
|
||||
done
|
||||
echo "docker is healthy"
|
||||
return 0
|
||||
}
|
||||
|
||||
stop_docker() {
|
||||
/etc/init.d/docker stop
|
||||
# Make sure docker gracefully terminated before start again
|
||||
starttime=`date +%s`
|
||||
while pidof docker > /dev/null; do
|
||||
currenttime=`date +%s`
|
||||
((elapsedtime = currenttime - starttime))
|
||||
# after 60 seconds, forcefully terminate docker process
|
||||
if test $elapsedtime -gt 60; then
|
||||
echo "attempting to kill docker process with sigkill signal"
|
||||
kill -9 `pidof docker` || sleep 10
|
||||
else
|
||||
echo "waiting clean shutdown"
|
||||
sleep 10
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
start_docker() {
|
||||
echo "docker is not running. starting docker"
|
||||
|
||||
# cleanup docker network checkpoint to avoid running into known issue
|
||||
# of docker (https://github.com/docker/docker/issues/18283)
|
||||
rm -rf /var/lib/docker/network
|
||||
|
||||
/etc/init.d/docker start
|
||||
}
|
||||
|
||||
main
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue