Vendor: Update k8s version

Signed-off-by: Michał Żyłowski <michal.zylowski@intel.com>
This commit is contained in:
Michał Żyłowski 2017-02-03 14:41:32 +01:00
parent dfa93414c5
commit 52baf68d50
3756 changed files with 113013 additions and 92675 deletions

87
vendor/k8s.io/kubernetes/cluster/saltbase/BUILD generated vendored Normal file
View file

@ -0,0 +1,87 @@
package(default_visibility = ["//visibility:public"])
load("@bazel_tools//tools/build_defs/pkg:pkg.bzl", "pkg_tar")
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)
# TODO(#3579): This is a temporary hack. It gathers up the yaml,
# yaml.in, json files in cluster/addons (minus any demos) and overlays
# them into kube-addons, where we expect them.
# These files are expected in a salt/kube-addons subdirectory.
pkg_tar(
name = "_salt_kube-addons",
package_dir = "salt/kube-addons",
strip_prefix = "/cluster/addons",
visibility = ["//visibility:private"],
deps = [
"//cluster/addons",
],
)
pkg_tar(
name = "salt",
files = glob(
["**"],
exclude = ["BUILD"],
),
mode = "0644",
modes = {
"install.sh": "0755",
},
package_dir = "kubernetes/saltbase",
strip_prefix = ".",
deps = [
":_salt_kube-addons",
],
)
# The following are used in the kubernetes salt tarball.
pkg_tar(
name = "salt-manifests",
files = [
"salt/fluentd-gcp/fluentd-gcp.yaml",
"salt/kube-proxy/kube-proxy.manifest",
"salt/kube-registry-proxy/kube-registry-proxy.yaml",
],
mode = "0644",
)
pkg_tar(
name = "_kube-admission-controls",
files = glob(["salt/kube-admission-controls/limit-range/**"]),
mode = "0644",
# Maintain limit-range/ subdirectory in tarball
strip_prefix = "./salt/kube-admission-controls/",
visibility = ["//visibility:private"],
)
pkg_tar(
name = "gci-trusty-salt-manifests",
files = [
"salt/cluster-autoscaler/cluster-autoscaler.manifest",
"salt/e2e-image-puller/e2e-image-puller.manifest",
"salt/etcd/etcd.manifest",
"salt/kube-addons/kube-addon-manager.yaml",
"salt/kube-apiserver/abac-authz-policy.jsonl",
"salt/kube-apiserver/kube-apiserver.manifest",
"salt/kube-controller-manager/kube-controller-manager.manifest",
"salt/kube-scheduler/kube-scheduler.manifest",
"salt/l7-gcp/glbc.manifest",
"salt/rescheduler/rescheduler.manifest",
],
mode = "0644",
deps = [
"_kube-admission-controls",
],
)

View file

@ -1,3 +1,6 @@
assignees:
- Crassirostris
- piosz
approvers:
- Crassirostris
- piosz
reviewers:
- Crassirostris
- piosz

View file

@ -10,7 +10,7 @@ spec:
dnsPolicy: Default
containers:
- name: fluentd-cloud-logging
image: gcr.io/google_containers/fluentd-gcp:1.32
image: gcr.io/google_containers/fluentd-gcp:1.34
# If fluentd consumes its own logs, the following situation may happen:
# fluentd fails to send a chunk to the server => writes it to the log =>
# tries to send this message to the server => fails to send a chunk and so on.
@ -34,8 +34,6 @@ spec:
- name: varlibdockercontainers
mountPath: /var/lib/docker/containers
readOnly: true
- name: libsystemddir
mountPath: /host/lib
# Liveness probe is aimed to help in situarions where fluentd
# silently hangs for no apparent reasons until manual restart.
# The idea of this probe is that if fluentd is not queueing or
@ -51,10 +49,23 @@ spec:
- '/bin/sh'
- '-c'
- >
LIVENESS_THRESHOLD_SECONDS=${LIVENESS_THRESHOLD_SECONDS:-600};
LIVENESS_THRESHOLD_SECONDS=${LIVENESS_THRESHOLD_SECONDS:-300};
STUCK_THRESHOLD_SECONDS=${LIVENESS_THRESHOLD_SECONDS:-900};
if [ ! -e /var/log/fluentd-buffers ];
then
exit 1;
fi;
LAST_MODIFIED_DATE=`stat /var/log/fluentd-buffers | grep Modify | sed -r "s/Modify: (.*)/\1/"`;
LAST_MODIFIED_TIMESTAMP=`date -d "$LAST_MODIFIED_DATE" +%s`;
if [ `date +%s` -gt `expr $LAST_MODIFIED_TIMESTAMP + $LIVENESS_THRESHOLD_SECONDS` ]; then exit 1; fi;
if [ `date +%s` -gt `expr $LAST_MODIFIED_TIMESTAMP + $STUCK_THRESHOLD_SECONDS` ];
then
rm -rf /var/log/fluentd-buffers;
exit 1;
fi;
if [ `date +%s` -gt `expr $LAST_MODIFIED_TIMESTAMP + $LIVENESS_THRESHOLD_SECONDS` ];
then
exit 1;
fi;
terminationGracePeriodSeconds: 30
volumes:
- name: varlog
@ -63,6 +74,3 @@ spec:
- name: varlibdockercontainers
hostPath:
path: /var/lib/docker/containers
- name: libsystemddir
hostPath:
path: /usr/lib64

View file

@ -99,7 +99,11 @@ else
cp -p pki/issued/kubernetes-master.crt "${cert_dir}/server.cert" > /dev/null 2>&1
cp -p pki/private/kubernetes-master.key "${cert_dir}/server.key" > /dev/null 2>&1
fi
./easyrsa build-client-full kubecfg nopass > /dev/null 2>&1
# Make a superuser client cert with subject "O=system:masters, CN=kubecfg"
./easyrsa --dn-mode=org \
--req-cn=kubecfg --req-org=system:masters \
--req-c= --req-st= --req-city= --req-email= --req-ou= \
build-client-full kubecfg nopass > /dev/null 2>&1
cp -p pki/ca.crt "${cert_dir}/ca.crt"
cp -p pki/issued/kubecfg.crt "${cert_dir}/kubecfg.crt"
cp -p pki/private/kubecfg.key "${cert_dir}/kubecfg.key"

View file

@ -22,12 +22,9 @@
FSCK=fsck.ext4
MOUNT_OPTIONS="discard,defaults"
MKFS="mkfs.ext4 -E lazy_itable_init=0,lazy_journal_init=0 -F"
MKFS="mkfs.ext4 -F"
if [ -e /etc/redhat-release ]; then
if grep -q '6\..' /etc/redhat-release; then
# lazy_journal_init is not recognized in redhat 6
MKFS="mkfs.ext4 -E lazy_itable_init=0 -F"
elif grep -q '7\..' /etc/redhat-release; then
if grep -q '7\..' /etc/redhat-release; then
FSCK=fsck.xfs
MKFS=mkfs.xfs
fi