Vendor: Update k8s version

Signed-off-by: Michał Żyłowski <michal.zylowski@intel.com>
This commit is contained in:
Michał Żyłowski 2017-02-03 14:41:32 +01:00
parent dfa93414c5
commit 52baf68d50
3756 changed files with 113013 additions and 92675 deletions

View file

@ -4,21 +4,14 @@ load("@bazel_tools//tools/build_defs/pkg:pkg.bzl", "pkg_tar")
filegroup(
name = "addon-srcs",
srcs = glob([
"calico-policy-controller/*",
"cluster-loadbalancing/*",
"cluster-monitoring/*",
"dashboard/*",
"dns/*",
"etcd-empty-dir-cleanup/*",
"fluentd-elasticsearch/*",
"fluentd-gcp/*",
"gci/*",
"node-problem-detector/*",
"podsecuritypolicies/*",
"python-image/*",
"registry/*",
]),
srcs = glob(
[
"**/*.json",
"**/*.yaml",
"**/*.yaml.in",
],
exclude = ["**/*demo*/**"],
),
)
pkg_tar(
@ -27,6 +20,7 @@ pkg_tar(
files = [
":addon-srcs",
],
mode = "0644",
strip_prefix = ".",
)

View file

@ -1,3 +1,6 @@
assignees:
- DirectXMan12
- piosz
approvers:
- DirectXMan12
- piosz
reviewers:
- DirectXMan12
- piosz

View file

@ -1,3 +1,6 @@
assignees:
approvers:
- bowei
- mrhohn
reviewers:
- bowei
- mrhohn

View file

@ -1,3 +1,6 @@
assignees:
approvers:
- bowei
- mrhohn
reviewers:
- bowei
- mrhohn

View file

@ -47,7 +47,7 @@ spec:
spec:
containers:
- name: kubedns
image: gcr.io/google_containers/k8s-dns-kube-dns-amd64:1.10.1
image: gcr.io/google_containers/k8s-dns-kube-dns-amd64:1.11.0
resources:
# TODO: Set memory limits when we've profiled the container for large
# clusters, then set request = limit to keep this container in
@ -96,7 +96,7 @@ spec:
name: metrics
protocol: TCP
- name: dnsmasq
image: gcr.io/google_containers/k8s-dns-dnsmasq-amd64:1.10.1
image: gcr.io/google_containers/k8s-dns-dnsmasq-amd64:1.11.0
livenessProbe:
httpGet:
path: /healthcheck/dnsmasq
@ -124,7 +124,7 @@ spec:
cpu: 150m
memory: 10Mi
- name: sidecar
image: gcr.io/google_containers/k8s-dns-sidecar-amd64:1.10.1
image: gcr.io/google_containers/k8s-dns-sidecar-amd64:1.11.0
livenessProbe:
httpGet:
path: /metrics

View file

@ -47,7 +47,7 @@ spec:
spec:
containers:
- name: kubedns
image: gcr.io/google_containers/k8s-dns-kube-dns-amd64:1.10.1
image: gcr.io/google_containers/k8s-dns-kube-dns-amd64:1.11.0
resources:
# TODO: Set memory limits when we've profiled the container for large
# clusters, then set request = limit to keep this container in
@ -96,7 +96,7 @@ spec:
name: metrics
protocol: TCP
- name: dnsmasq
image: gcr.io/google_containers/k8s-dns-dnsmasq-amd64:1.10.1
image: gcr.io/google_containers/k8s-dns-dnsmasq-amd64:1.11.0
livenessProbe:
httpGet:
path: /healthcheck/dnsmasq
@ -124,7 +124,7 @@ spec:
cpu: 150m
memory: 10Mi
- name: sidecar
image: gcr.io/google_containers/k8s-dns-sidecar-amd64:1.10.1
image: gcr.io/google_containers/k8s-dns-sidecar-amd64:1.11.0
livenessProbe:
httpGet:
path: /metrics

View file

@ -47,7 +47,7 @@ spec:
spec:
containers:
- name: kubedns
image: gcr.io/google_containers/k8s-dns-kube-dns-amd64:1.10.1
image: gcr.io/google_containers/k8s-dns-kube-dns-amd64:1.11.0
resources:
# TODO: Set memory limits when we've profiled the container for large
# clusters, then set request = limit to keep this container in
@ -95,7 +95,7 @@ spec:
name: metrics
protocol: TCP
- name: dnsmasq
image: gcr.io/google_containers/k8s-dns-dnsmasq-amd64:1.10.1
image: gcr.io/google_containers/k8s-dns-dnsmasq-amd64:1.11.0
livenessProbe:
httpGet:
path: /healthcheck/dnsmasq
@ -123,7 +123,7 @@ spec:
cpu: 150m
memory: 10Mi
- name: sidecar
image: gcr.io/google_containers/k8s-dns-sidecar-amd64:1.10.1
image: gcr.io/google_containers/k8s-dns-sidecar-amd64:1.11.0
livenessProbe:
httpGet:
path: /metrics

View file

@ -1,19 +0,0 @@
# This is the main user for the e2e tests. This is ok to leave long term
# since the first user in the test can reasonably be high power
# its kubecfg in gce
# TODO consider provisioning each test its namespace and giving it an
# admin user. This still has to exist, but e2e wouldn't normally use it
apiVersion: rbac.authorization.k8s.io/v1alpha1
kind: ClusterRoleBinding
metadata:
name: e2e-user-cluster-admin
labels:
kubernetes.io/cluster-service: "true"
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- apiVersion: rbac/v1alpha1
kind: User
name: kubecfg

View file

@ -1,3 +1,6 @@
assignees:
- Crassirostris
- piosz
approvers:
- Crassirostris
- piosz
reviewers:
- Crassirostris
- piosz

View file

@ -20,7 +20,7 @@ spec:
kubernetes.io/cluster-service: "true"
spec:
containers:
- image: gcr.io/google_containers/elasticsearch:v2.4.1
- image: gcr.io/google_containers/elasticsearch:v2.4.1-1
name: elasticsearch-logging
resources:
# need more cpu upon initialization, therefore burstable class

View file

@ -16,7 +16,7 @@
# The current value of the tag to be used for building and
# pushing an image to gcr.io
TAG = v2.4.1
TAG = v2.4.1-1
build: elasticsearch_logging_discovery
docker build --pull -t gcr.io/google_containers/elasticsearch:$(TAG) .

View file

@ -1,5 +1,6 @@
cluster.name: kubernetes-logging
node.name: ${NODE_NAME}
node.master: ${NODE_MASTER}
node.data: ${NODE_DATA}

View file

@ -24,9 +24,10 @@ import (
"time"
"github.com/golang/glog"
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
restclient "k8s.io/client-go/rest"
"k8s.io/kubernetes/pkg/api"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/pkg/client/restclient"
)
func flattenSubsets(subsets []api.EndpointSubset) []string {
@ -52,10 +53,10 @@ func main() {
if err != nil {
glog.Fatalf("Failed to make client: %v", err)
}
namespace := api.NamespaceSystem
namespace := metav1.NamespaceSystem
envNamespace := os.Getenv("NAMESPACE")
if envNamespace != "" {
if _, err := client.Core().Namespaces().Get(envNamespace); err != nil {
if _, err := client.Core().Namespaces().Get(envNamespace, meta_v1.GetOptions{}); err != nil {
glog.Fatalf("%s namespace doesn't exist: %v", envNamespace, err)
}
namespace = envNamespace
@ -65,7 +66,7 @@ func main() {
// Look for endpoints associated with the Elasticsearch loggging service.
// First wait for the service to become available.
for t := time.Now(); time.Since(t) < 5*time.Minute; time.Sleep(10 * time.Second) {
elasticsearch, err = client.Core().Services(namespace).Get("elasticsearch-logging")
elasticsearch, err = client.Core().Services(namespace).Get("elasticsearch-logging", meta_v1.GetOptions{})
if err == nil {
break
}
@ -82,7 +83,7 @@ func main() {
// Wait for some endpoints.
count := 0
for t := time.Now(); time.Since(t) < 5*time.Minute; time.Sleep(10 * time.Second) {
endpoints, err = client.Core().Endpoints(namespace).Get("elasticsearch-logging")
endpoints, err = client.Core().Endpoints(namespace).Get("elasticsearch-logging", meta_v1.GetOptions{})
if err != nil {
continue
}

View file

@ -14,6 +14,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
export NODE_NAME=${NODE_NAME:-${HOSTNAME}}
export NODE_MASTER=${NODE_MASTER:-true}
export NODE_DATA=${NODE_DATA:-true}
export HTTP_PORT=${HTTP_PORT:-9200}

View file

@ -1,3 +1,6 @@
assignees:
- Crassirostris
- piosz
approvers:
- Crassirostris
- piosz
reviewers:
- Crassirostris
- piosz

View file

@ -2,23 +2,23 @@
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
name: fluentd-gcp-v1.31
name: fluentd-gcp-v1.34
namespace: kube-system
labels:
k8s-app: fluentd-gcp
kubernetes.io/cluster-service: "true"
version: v1.31
version: v1.34
spec:
template:
metadata:
labels:
k8s-app: fluentd-gcp
kubernetes.io/cluster-service: "true"
version: v1.31
version: v1.34
spec:
containers:
- name: fluentd-gcp
image: gcr.io/google_containers/fluentd-gcp:1.32
image: gcr.io/google_containers/fluentd-gcp:1.34
# If fluentd consumes its own logs, the following situation may happen:
# fluentd fails to send a chunk to the server => writes it to the log =>
# tries to send this message to the server => fails to send a chunk and so on.
@ -40,8 +40,6 @@ spec:
- name: varlibdockercontainers
mountPath: /var/lib/docker/containers
readOnly: true
- name: libsystemddir
mountPath: /host/lib
# Liveness probe is aimed to help in situarions where fluentd
# silently hangs for no apparent reasons until manual restart.
# The idea of this probe is that if fluentd is not queueing or
@ -84,6 +82,3 @@ spec:
- name: varlibdockercontainers
hostPath:
path: /var/lib/docker/containers
- name: libsystemddir
hostPath:
path: /usr/lib64

View file

@ -36,6 +36,7 @@ RUN apt-get -qq update && \
td-agent-gem install --no-document fluent-plugin-record-reformer -v 0.8.2 && \
td-agent-gem install --no-document fluent-plugin-systemd -v 0.0.5 && \
td-agent-gem install --no-document fluent-plugin-google-cloud -v 0.5.2 && \
td-agent-gem install --no-document fluent-plugin-detect-exceptions -v 0.0.4 && \
# Remove build tools
apt-get remove -y -qq gcc make && \
apt-get autoremove -y -qq && \

View file

@ -26,7 +26,7 @@
.PHONY: build push
PREFIX=gcr.io/google_containers
TAG = 1.32
TAG = 1.34
build:
docker build --pull -t $(PREFIX)/fluentd-gcp:$(TAG) .

View file

@ -70,7 +70,18 @@
<match reform.**>
type record_reformer
enable_ruby true
tag kubernetes.${tag_suffix[4].split('-')[0..-2].join('-')}
tag raw.kubernetes.${tag_suffix[4].split('-')[0..-2].join('-')}
</match>
# Detect exceptions in the log output and forward them as one log entry.
<match raw.kubernetes.**>
type detect_exceptions
remove_tag_prefix raw
message log
stream stream
multiline_flush_interval 5
max_bytes 500000
max_lines 1000
</match>
# Example:

View file

@ -17,12 +17,6 @@
# For systems without journald
mkdir -p /var/log/journal
if [ -e /host/lib/libsystemd* ]
then
rm /lib/x86_64-linux-gnu/libsystemd*
cp /host/lib/libsystemd* /lib/x86_64-linux-gnu/
fi
LD_PRELOAD=/opt/td-agent/embedded/lib/libjemalloc.so
RUBY_GC_HEAP_OLDOBJECT_LIMIT_FACTOR=0.9