Switch to github.com/golang/dep for vendoring
Signed-off-by: Mrunal Patel <mrunalp@gmail.com>
This commit is contained in:
parent
d6ab91be27
commit
8e5b17cf13
15431 changed files with 3971413 additions and 8881 deletions
140
vendor/k8s.io/kubernetes/hack/verify-flags/exceptions.txt
generated
vendored
Normal file
140
vendor/k8s.io/kubernetes/hack/verify-flags/exceptions.txt
generated
vendored
Normal file
|
@ -0,0 +1,140 @@
|
|||
Vagrantfile: node_ip = $node_ips[n]
|
||||
cluster/addons/addon-manager/kube-addons.sh:# Create admission_control objects if defined before any other addon services. If the limits
|
||||
cluster/aws/templates/configure-vm-aws.sh: # We set the hostname_override to the full EC2 private dns name
|
||||
cluster/aws/templates/configure-vm-aws.sh: api_servers: '${API_SERVERS}'
|
||||
cluster/aws/templates/configure-vm-aws.sh: env-to-grains "hostname_override"
|
||||
cluster/aws/templates/configure-vm-aws.sh: env-to-grains "runtime_config"
|
||||
cluster/aws/templates/configure-vm-aws.sh: kubelet_api_servers: '${KUBELET_APISERVER}'
|
||||
cluster/centos/util.sh: local node_ip=${node#*@}
|
||||
cluster/gce/configure-vm.sh: advertise_address: '${EXTERNAL_IP}'
|
||||
cluster/gce/configure-vm.sh: api_servers: '${KUBERNETES_MASTER_NAME}'
|
||||
cluster/gce/configure-vm.sh: cloud_config: ${CLOUD_CONFIG}
|
||||
cluster/gce/configure-vm.sh: env-to-grains "feature_gates"
|
||||
cluster/gce/configure-vm.sh: env-to-grains "runtime_config"
|
||||
cluster/gce/configure-vm.sh: kubelet_api_servers: '${KUBELET_APISERVER}'
|
||||
cluster/gce/container-linux/configure-helper.sh: authorization_mode+=",ABAC"
|
||||
cluster/gce/container-linux/configure-helper.sh: authorization_mode+=",Webhook"
|
||||
cluster/gce/container-linux/configure-helper.sh: local api_servers="--master=https://${KUBERNETES_MASTER_NAME}"
|
||||
cluster/gce/container-linux/configure-helper.sh: local authorization_mode="RBAC"
|
||||
cluster/gce/container-linux/configure-helper.sh: sed -i -e "s@{{ *storage_backend *}}@${STORAGE_BACKEND:-}@g" "${temp_file}"
|
||||
cluster/gce/container-linux/configure-helper.sh: sed -i -e "s@{{pillar\['allow_privileged'\]}}@true@g" "${src_file}"
|
||||
cluster/gce/gci/configure-helper.sh: authorization_mode+=",ABAC"
|
||||
cluster/gce/gci/configure-helper.sh: authorization_mode+=",Webhook"
|
||||
cluster/gce/gci/configure-helper.sh: local api_servers="--master=https://${KUBERNETES_MASTER_NAME}"
|
||||
cluster/gce/gci/configure-helper.sh: local authorization_mode="RBAC"
|
||||
cluster/gce/gci/configure-helper.sh: sed -i -e "s@{{ *storage_backend *}}@${STORAGE_BACKEND:-}@g" "${temp_file}"
|
||||
cluster/gce/gci/configure-helper.sh: sed -i -e "s@{{pillar\['allow_privileged'\]}}@true@g" "${src_file}"
|
||||
cluster/gce/trusty/configure-helper.sh: sed -i -e "s@{{ *storage_backend *}}@${STORAGE_BACKEND:-}@g" "${temp_file}"
|
||||
cluster/gce/trusty/configure-helper.sh: sed -i -e "s@{{pillar\['allow_privileged'\]}}@true@g" "${src_file}"
|
||||
cluster/gce/util.sh: local node_ip=$(gcloud compute instances describe --project "${PROJECT}" --zone "${ZONE}" \
|
||||
cluster/juju/layers/kubernetes/reactive/k8s.py: check_call(split(cmd.format(kubeconfig, cluster_name, server, ca)))
|
||||
cluster/juju/layers/kubernetes/reactive/k8s.py: check_call(split(cmd.format(kubeconfig, context, cluster_name, user)))
|
||||
cluster/juju/layers/kubernetes/reactive/k8s.py: client_key = '/srv/kubernetes/client.key'
|
||||
cluster/juju/layers/kubernetes/reactive/k8s.py: cluster_name = 'kubernetes'
|
||||
cluster/juju/layers/kubernetes/reactive/k8s.py: tlslib.client_key(None, client_key, user='ubuntu', group='ubuntu')
|
||||
cluster/lib/logging.sh: local source_file=${BASH_SOURCE[$frame_no]}
|
||||
cluster/lib/logging.sh: local source_file=${BASH_SOURCE[$stack_skip]}
|
||||
cluster/log-dump.sh: local -r node_name="${1}"
|
||||
cluster/log-dump.sh: for node_name in "${node_names[@]}"; do
|
||||
cluster/log-dump.sh:readonly report_dir="${1:-_artifacts}"
|
||||
cluster/photon-controller/templates/salt-master.sh: api_servers: $MASTER_NAME
|
||||
cluster/photon-controller/templates/salt-minion.sh: hostname_override: $(ip route get 1.1.1.1 | awk '{print $7}')
|
||||
cluster/photon-controller/util.sh: node_ip=$(${PHOTON} vm networks "${node_id}" | grep -i $'\t'"00:0C:29" | grep -E '[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+' | head -1 | awk -F'\t' '{print $3}')
|
||||
cluster/photon-controller/util.sh: local cert_dir="/srv/kubernetes"
|
||||
cluster/photon-controller/util.sh: node_name=${1}
|
||||
cluster/rackspace/util.sh: local node_ip=$(nova show --minimal ${NODE_NAMES[$i]} \
|
||||
cluster/saltbase/salt/cluster-autoscaler/cluster-autoscaler.manifest:{% set params = pillar['autoscaler_mig_config'] + " " + cloud_config -%}
|
||||
cluster/saltbase/salt/etcd/etcd.manifest: "value": "{{ storage_backend }}"
|
||||
cluster/saltbase/salt/etcd/etcd.manifest:{% if pillar.get('storage_backend', 'etcd3') == 'etcd3' -%}
|
||||
cluster/saltbase/salt/etcd/etcd.manifest:{% set storage_backend = pillar.get('storage_backend', 'etcd3') -%}
|
||||
cluster/saltbase/salt/kube-admission-controls/init.sls:{% if 'LimitRanger' in pillar.get('admission_control', '') %}
|
||||
cluster/saltbase/salt/kube-apiserver/kube-apiserver.manifest:{% set params = address + " " + storage_backend + " " + etcd_servers + " " + etcd_servers_overrides + " " + cloud_provider + " " + cloud_config + " " + runtime_config + " " + feature_gates + " " + admission_control + " " + max_requests_inflight + " " + target_ram_mb + " " + service_cluster_ip_range + " " + client_ca_file + basic_auth_file + " " + min_request_timeout + " " + enable_garbage_collector + " " + etcd_quorum_read -%}
|
||||
cluster/saltbase/salt/kube-controller-manager/kube-controller-manager.manifest:{% if pillar.get('enable_hostpath_provisioner', '').lower() == 'true' -%}
|
||||
cluster/saltbase/salt/kube-controller-manager/kube-controller-manager.manifest:{% set params = "--master=127.0.0.1:8080" + " " + cluster_name + " " + cluster_cidr + " " + allocate_node_cidrs + " " + service_cluster_ip_range + " " + terminated_pod_gc + " " + enable_garbage_collector + " " + cloud_provider + " " + cloud_config + " " + service_account_key + " " + log_level + " " + root_ca_file -%}
|
||||
cluster/saltbase/salt/kube-controller-manager/kube-controller-manager.manifest:{% set params = params + " " + feature_gates -%}
|
||||
cluster/saltbase/salt/kube-proxy/kube-proxy.manifest: {% set api_servers_with_port = api_servers + ":6443" -%}
|
||||
cluster/saltbase/salt/kube-proxy/kube-proxy.manifest: {% set api_servers_with_port = api_servers -%}
|
||||
cluster/saltbase/salt/kube-proxy/kube-proxy.manifest: {% set cluster_cidr=" --cluster-cidr=" + pillar['cluster_cidr'] %}
|
||||
cluster/saltbase/salt/kube-proxy/kube-proxy.manifest:{% set params = log_level + " " + feature_gates + " " + test_args -%}
|
||||
cluster/saltbase/salt/kube-scheduler/kube-scheduler.manifest:{% set params = params + log_level + " " + feature_gates + " " + scheduling_algorithm_provider -%}
|
||||
cluster/saltbase/salt/kubelet/default: {% set api_servers_with_port = api_servers + ":6443" -%}
|
||||
cluster/saltbase/salt/kubelet/default: {% set api_servers_with_port = api_servers -%}
|
||||
cluster/saltbase/salt/kubelet/default: {% set enable_custom_metrics="--enable-custom-metrics=" + pillar['enable_custom_metrics'] %}
|
||||
cluster/saltbase/salt/kubelet/default: {% set eviction_hard="--eviction-hard=" + pillar['eviction_hard'] %}
|
||||
cluster/saltbase/salt/kubelet/default: {% set kubelet_port="--port=" + pillar['kubelet_port'] %}
|
||||
cluster/saltbase/salt/kubelet/default: {% set node_labels="--node-labels=" + pillar['node_labels'] %}
|
||||
cluster/saltbase/salt/kubelet/default:{% if grains['feature_gates'] is defined -%}
|
||||
cluster/saltbase/salt/kubelet/default:{% if pillar.get('non_masquerade_cidr','') -%}
|
||||
cluster/saltbase/salt/opencontrail-networking-master/init.sls: - 'SERVICE_CLUSTER_IP_RANGE': '{{ pillar.get('service_cluster_ip_range') }}'
|
||||
cluster/saltbase/salt/opencontrail-networking-minion/init.sls: - 'SERVICE_CLUSTER_IP_RANGE': '{{ pillar.get('service_cluster_ip_range') }}'
|
||||
cluster/saltbase/salt/supervisor/kubelet-checker.sh: {% set kubelet_port = pillar['kubelet_port'] -%}
|
||||
cluster/saltbase/salt/supervisor/supervisor_watcher.sh:# Apply oom_score_adj: -901 to processes
|
||||
cluster/ubuntu/util.sh: local node_ip=${1}
|
||||
cluster/vagrant/provision-utils.sh: api_servers: '$(echo "$MASTER_IP" | sed -e "s/'/''/g")'
|
||||
cluster/vagrant/provision-utils.sh: node_ip: '$(echo "$MASTER_IP" | sed -e "s/'/''/g")'
|
||||
cluster/vagrant/provision-utils.sh: runtime_config: '$(echo "$RUNTIME_CONFIG" | sed -e "s/'/''/g")'
|
||||
examples/cluster-dns/images/frontend/client.py: service_address = socket.gethostbyname(hostname)
|
||||
examples/storage/cassandra/image/files/run.sh: cluster_name \
|
||||
examples/storage/vitess/env.sh: node_ip=$(get_node_ip)
|
||||
federation/cluster/common.sh: local cert_dir="${kube_temp}/easy-rsa-master/easyrsa3"
|
||||
federation/deploy/config.json.sample: "cloud_provider": "gce",
|
||||
federation/deploy/config.json.sample: "cloud_provider": "gce",
|
||||
federation/deploy/config.json.sample: "cloud_provider": "gce",
|
||||
federation/deploy/config.json.sample: "cluster_cidr": "10.180.0.0/14",
|
||||
federation/deploy/config.json.sample: "cluster_cidr": "10.184.0.0/14",
|
||||
federation/deploy/config.json.sample: "cluster_cidr": "10.188.0.0/14",
|
||||
federation/deploy/config.json.sample: "cluster_name": "cluster1-kubernetes",
|
||||
federation/deploy/config.json.sample: "cluster_name": "cluster2-kubernetes",
|
||||
federation/deploy/config.json.sample: "cluster_name": "cluster3-kubernetes",
|
||||
federation/deploy/config.json.sample: "num_nodes": 3,
|
||||
federation/deploy/config.json.sample: "num_nodes": 3,
|
||||
federation/deploy/config.json.sample: "num_nodes": 3,
|
||||
hack/e2e.go:.phase1.cloud_provider="gce"
|
||||
hack/e2e.go:.phase1.cluster_name="{{.Cluster}}"
|
||||
hack/e2e.go:.phase1.num_nodes=4
|
||||
hack/lib/util.sh: local api_port=$5
|
||||
hack/local-up-cluster.sh: advertise_address="--advertise_address=${API_HOST_IP}"
|
||||
hack/local-up-cluster.sh: runtime_config="--runtime-config=${RUNTIME_CONFIG}"
|
||||
hack/local-up-cluster.sh: advertise_address=""
|
||||
hack/local-up-cluster.sh: runtime_config=""
|
||||
hack/make-rules/test-e2e-node.sh: image_project=${IMAGE_PROJECT:-"google-containers"}
|
||||
hack/make-rules/test-e2e-node.sh: delete_instances=${DELETE_INSTANCES:-"false"}
|
||||
hack/make-rules/test-e2e-node.sh: image_project=${IMAGE_PROJECT:-"kubernetes-node-e2e-images"}
|
||||
hack/test-update-storage-objects.sh: local storage_backend=${1:-"${STORAGE_BACKEND_ETCD2}"}
|
||||
hack/test-update-storage-objects.sh: local storage_media_type=${3:-""}
|
||||
hack/test-update-storage-objects.sh: local storage_versions=${2:-""}
|
||||
hack/test-update-storage-objects.sh: source_file=${test_data[0]}
|
||||
hack/test-update-storage-objects.sh:# source_file,resource,namespace,name,old_version,new_version
|
||||
pkg/kubelet/api/v1alpha1/runtime/api.pb.go: ContainerPort *int32 `protobuf:"varint,2,opt,name=container_port,json=containerPort" json:"container_port,omitempty"`
|
||||
pkg/kubelet/api/v1alpha1/runtime/api.pb.go: OomScoreAdj *int64 `protobuf:"varint,5,opt,name=oom_score_adj,json=oomScoreAdj" json:"oom_score_adj,omitempty"`
|
||||
pkg/kubelet/api/v1alpha1/runtime/api.pb.go: PodCidr *string `protobuf:"bytes,1,opt,name=pod_cidr,json=podCidr" json:"pod_cidr,omitempty"`
|
||||
pkg/kubelet/api/v1alpha1/runtime/api.pb.go: RuntimeConfig *RuntimeConfig `protobuf:"bytes,1,opt,name=runtime_config,json=runtimeConfig" json:"runtime_config,omitempty"`
|
||||
pkg/kubelet/api/v1alpha1/runtime/api.proto: optional RuntimeConfig runtime_config = 1;
|
||||
pkg/kubelet/api/v1alpha1/runtime/api.proto: optional int32 container_port = 2;
|
||||
pkg/kubelet/api/v1alpha1/runtime/api.proto: optional int64 oom_score_adj = 5;
|
||||
pkg/kubelet/api/v1alpha1/runtime/api.proto: optional string pod_cidr = 1;
|
||||
pkg/kubelet/cm/container_manager_linux.go: glog.V(3).Infof("Failed to apply oom_score_adj %d for pid %d: %v", oomScoreAdj, pid, err)
|
||||
pkg/kubelet/cm/container_manager_linux.go: glog.V(5).Infof("attempting to apply oom_score_adj of %d to pid %d", oomScoreAdj, pid)
|
||||
pkg/kubelet/network/hairpin/hairpin.go: hairpinModeRelativePath = "hairpin_mode"
|
||||
pkg/kubelet/qos/policy_test.go: t.Errorf("oom_score_adj should be between %d and %d, but was %d", test.lowOOMScoreAdj, test.highOOMScoreAdj, oomScoreAdj)
|
||||
pkg/kubelet/qos/policy_test.go: highOOMScoreAdj int // The min oom_score_adj score the container should be assigned.
|
||||
pkg/kubelet/qos/policy_test.go: lowOOMScoreAdj int // The max oom_score_adj score the container should be assigned.
|
||||
pkg/util/oom/oom_linux.go: return fmt.Errorf("invalid PID %d specified for oom_score_adj", pid)
|
||||
pkg/util/oom/oom_linux.go: oomScoreAdjPath := path.Join("/proc", pidStr, "oom_score_adj")
|
||||
pkg/util/oom/oom_linux.go:// Writes 'value' to /proc/<pid>/oom_score_adj for all processes in cgroup cgroupName.
|
||||
pkg/util/oom/oom_linux.go:// Writes 'value' to /proc/<pid>/oom_score_adj. PID = 0 means self
|
||||
test/e2e/common/configmap.go: Command: []string{"/mt", "--break_on_expected_content=false", "--retry_time=120", "--file_content_in_loop=/etc/configmap-volume/data-1"},
|
||||
test/e2e/common/downwardapi_volume.go: Command: []string{"/mt", "--break_on_expected_content=false", "--retry_time=120", "--file_content_in_loop=" + filePath},
|
||||
test/e2e/common/host_path.go: fmt.Sprintf("--file_content_in_loop=%v", filePath),
|
||||
test/e2e/common/host_path.go: fmt.Sprintf("--file_content_in_loop=%v", filePathInReader),
|
||||
test/e2e/common/host_path.go: fmt.Sprintf("--retry_time=%d", retryDuration),
|
||||
test/e2e/common/host_path.go: fmt.Sprintf("--retry_time=%d", retryDuration),
|
||||
test/e2e_node/container_manager_test.go: return fmt.Errorf("expected pid %d's oom_score_adj to be %d; found %d", pid, expectedOOMScoreAdj, oomScore)
|
||||
test/e2e_node/container_manager_test.go: return fmt.Errorf("expected pid %d's oom_score_adj to be < %d; found %d", pid, expectedMaxOOMScoreAdj, oomScore)
|
||||
test/e2e_node/container_manager_test.go: return fmt.Errorf("expected pid %d's oom_score_adj to be >= %d; found %d", pid, expectedMinOOMScoreAdj, oomScore)
|
||||
test/e2e_node/container_manager_test.go: return fmt.Errorf("failed to get oom_score_adj for %d", pid)
|
||||
test/e2e_node/container_manager_test.go: return fmt.Errorf("failed to get oom_score_adj for %d: %v", pid, err)
|
||||
test/e2e_node/container_manager_test.go: procfsPath := path.Join("/proc", strconv.Itoa(pid), "oom_score_adj")
|
||||
test/images/mount-tester/mt.go: flag.BoolVar(&breakOnExpectedContent, "break_on_expected_content", true, "Break out of loop on expected content, (use with --file_content_in_loop flag only)")
|
||||
test/images/mount-tester/mt.go: flag.IntVar(&retryDuration, "retry_time", 180, "Retry time during the loop")
|
||||
test/images/mount-tester/mt.go: flag.StringVar(&readFileContentInLoopPath, "file_content_in_loop", "", "Path to read the file content in loop from")
|
26
vendor/k8s.io/kubernetes/hack/verify-flags/excluded-flags.txt
generated
vendored
Normal file
26
vendor/k8s.io/kubernetes/hack/verify-flags/excluded-flags.txt
generated
vendored
Normal file
|
@ -0,0 +1,26 @@
|
|||
check_leaked_resources
|
||||
check_node_count
|
||||
check_version_skew
|
||||
concurrent_rc_syncs
|
||||
file_content
|
||||
file_mode
|
||||
file_owner
|
||||
file_perm
|
||||
fs_type
|
||||
gke_context
|
||||
max_in_flight
|
||||
max_par
|
||||
new_file_0644
|
||||
new_file_0660
|
||||
new_file_0666
|
||||
new_file_0777
|
||||
pods_per_node
|
||||
pods_per_node
|
||||
test_args
|
||||
up_to
|
||||
up_to
|
||||
upgrade_args
|
||||
valid_flag
|
||||
retry_time
|
||||
file_content_in_loop
|
||||
break_on_expected_content
|
640
vendor/k8s.io/kubernetes/hack/verify-flags/known-flags.txt
generated
vendored
Normal file
640
vendor/k8s.io/kubernetes/hack/verify-flags/known-flags.txt
generated
vendored
Normal file
|
@ -0,0 +1,640 @@
|
|||
accept-hosts
|
||||
accept-paths
|
||||
admission-control
|
||||
admission-control-config-file
|
||||
advertise-address
|
||||
advertised-address
|
||||
algorithm-provider
|
||||
all-namespaces
|
||||
allocate-node-cidrs
|
||||
allow-missing-template-keys
|
||||
allow-privileged
|
||||
allowed-not-ready-nodes
|
||||
anonymous-auth
|
||||
api-advertise-addresses
|
||||
api-burst
|
||||
api-external-dns-names
|
||||
api-port
|
||||
api-prefix
|
||||
api-rate
|
||||
api-server-port
|
||||
api-servers
|
||||
api-token
|
||||
api-version
|
||||
apiserver-count
|
||||
apiserver-count
|
||||
audit-log-maxage
|
||||
audit-log-maxbackup
|
||||
audit-log-maxsize
|
||||
audit-log-path
|
||||
auth-path
|
||||
auth-path
|
||||
auth-provider
|
||||
auth-provider
|
||||
auth-provider-arg
|
||||
auth-provider-arg
|
||||
authentication-kubeconfig
|
||||
authentication-token-webhook
|
||||
authentication-token-webhook-cache-ttl
|
||||
authentication-token-webhook-config-file
|
||||
authorization-kubeconfig
|
||||
authorization-mode
|
||||
authorization-policy-file
|
||||
authorization-rbac-super-user
|
||||
authorization-webhook-cache-authorized-ttl
|
||||
authorization-webhook-cache-unauthorized-ttl
|
||||
authorization-webhook-config-file
|
||||
azure-container-registry-config
|
||||
babysit-daemons
|
||||
basic-auth-file
|
||||
bench-pods
|
||||
bench-quiet
|
||||
bench-tasks
|
||||
bench-workers
|
||||
bind-address
|
||||
bind-pods-burst
|
||||
bind-pods-qps
|
||||
bounding-dirs
|
||||
build-dependencies
|
||||
build-only
|
||||
build-tag
|
||||
cadvisor-port
|
||||
cert-dir
|
||||
certificate-authority
|
||||
cgroup-driver
|
||||
cgroup-root
|
||||
chaos-chance
|
||||
clean-start
|
||||
cleanup
|
||||
cleanup-iptables
|
||||
client-ca-file
|
||||
client-certificate
|
||||
client-key
|
||||
clientset-api-path
|
||||
clientset-name
|
||||
clientset-only
|
||||
clientset-path
|
||||
cloud-config
|
||||
cloud-provider
|
||||
cluster-cidr
|
||||
cluster-context
|
||||
cluster-dns
|
||||
cluster-domain
|
||||
cluster-ip
|
||||
cluster-monitor-period
|
||||
cluster-name
|
||||
cluster-signing-cert-file
|
||||
cluster-signing-key-file
|
||||
cluster-tag
|
||||
cni-bin-dir
|
||||
cni-conf-dir
|
||||
concurrent-deployment-syncs
|
||||
concurrent-endpoint-syncs
|
||||
concurrent-gc-syncs
|
||||
concurrent-namespace-syncs
|
||||
concurrent-replicaset-syncs
|
||||
concurrent-resource-quota-syncs
|
||||
concurrent-serviceaccount-token-syncs
|
||||
concurrent-service-syncs
|
||||
config-map
|
||||
config-map-namespace
|
||||
config-sync-period
|
||||
configure-cloud-routes
|
||||
conntrack-max
|
||||
conntrack-max-per-core
|
||||
conntrack-min
|
||||
conntrack-tcp-timeout-close-wait
|
||||
conntrack-tcp-timeout-established
|
||||
consumer-port
|
||||
consumer-service-name
|
||||
consumer-service-namespace
|
||||
container-port
|
||||
container-runtime
|
||||
container-runtime-endpoint
|
||||
contain-pod-resources
|
||||
contention-profiling
|
||||
controller-start-interval
|
||||
cors-allowed-origins
|
||||
cpu-cfs-quota
|
||||
cpu-percent
|
||||
create-external-load-balancer
|
||||
current-release-pr
|
||||
current-replicas
|
||||
daemonset-lookup-cache-size
|
||||
data-dir
|
||||
default-container-cpu-limit
|
||||
default-container-mem-limit
|
||||
delay-shutdown
|
||||
delete-collection-workers
|
||||
delete-instances
|
||||
delete-local-data
|
||||
delete-namespace
|
||||
delete-namespace-on-failure
|
||||
deleting-pods-burst
|
||||
deleting-pods-qps
|
||||
deployment-controller-sync-period
|
||||
deployment-label-key
|
||||
deserialization-cache-size
|
||||
dest-file
|
||||
disable-filter
|
||||
disable-kubenet
|
||||
discovery-port
|
||||
dns-bind-address
|
||||
dns-port
|
||||
dns-provider
|
||||
dns-provider-config
|
||||
dns-zone-name
|
||||
docker-email
|
||||
docker-endpoint
|
||||
docker-exec-handler
|
||||
docker-password
|
||||
docker-server
|
||||
docker-username
|
||||
dockercfg-path
|
||||
driver-port
|
||||
drop-embedded-fields
|
||||
dry-run
|
||||
dump-logs-on-failure
|
||||
duration-sec
|
||||
e2e-output-dir
|
||||
e2e-verify-service-account
|
||||
enable-controller-attach-detach
|
||||
enable-cri
|
||||
enable-custom-metrics
|
||||
enable-debugging-handlers
|
||||
enable-dynamic-provisioning
|
||||
enable-garbage-collector
|
||||
enable-garbage-collector
|
||||
enable-garbage-collector
|
||||
enable-hostpath-provisioner
|
||||
enable-server
|
||||
enable-swagger-ui
|
||||
etcd-address
|
||||
etcd-cafile
|
||||
etcd-certfile
|
||||
etcd-config
|
||||
etcd-keyfile
|
||||
etcd-mutation-timeout
|
||||
etcd-prefix
|
||||
etcd-pv-capacity
|
||||
etcd-quorum-read
|
||||
etcd-server
|
||||
etcd-servers
|
||||
etcd-servers-overrides
|
||||
event-burst
|
||||
event-qps
|
||||
event-ttl
|
||||
eviction-hard
|
||||
eviction-max-pod-grace-period
|
||||
eviction-minimum-reclaim
|
||||
eviction-pressure-transition-period
|
||||
eviction-soft
|
||||
eviction-soft-grace-period
|
||||
executor-bindall
|
||||
executor-logv
|
||||
executor-path
|
||||
executor-suicide-timeout
|
||||
exit-on-lock-contention
|
||||
experimental-allowed-unsafe-sysctls
|
||||
experimental-bootstrap-kubeconfig
|
||||
experimental-cgroups-per-qos
|
||||
experimental-keystone-url
|
||||
experimental-keystone-ca-file
|
||||
experimental-mounter-path
|
||||
experimental-nvidia-gpus
|
||||
experimental-prefix
|
||||
experimental-cri
|
||||
experimental-check-node-capabilities-before-mount
|
||||
experimental-kernel-memcg-notification
|
||||
external-etcd-cafile
|
||||
external-etcd-certfile
|
||||
external-etcd-endpoints
|
||||
external-etcd-keyfile
|
||||
external-hostname
|
||||
external-ip
|
||||
external-name
|
||||
extra-peer-dirs
|
||||
experimental-fail-swap-on
|
||||
failover-timeout
|
||||
failure-domains
|
||||
fake-clientset
|
||||
feature-gates
|
||||
federated-api-burst
|
||||
federated-api-qps
|
||||
federated-kube-context
|
||||
federation-name
|
||||
federation-system-namespace
|
||||
file-check-frequency
|
||||
file-suffix
|
||||
file_content_in_loop
|
||||
flex-volume-plugin-dir
|
||||
forward-services
|
||||
framework-name
|
||||
framework-store-uri
|
||||
framework-weburi
|
||||
from-file
|
||||
from-literal
|
||||
func-dest
|
||||
fuzz-iters
|
||||
garbage-collector-enabled
|
||||
gather-logs-sizes
|
||||
gather-metrics-at-teardown
|
||||
gather-resource-usage
|
||||
gce-project
|
||||
gce-service-account
|
||||
gce-zone
|
||||
ginkgo-flags
|
||||
gke-cluster
|
||||
go-header-file
|
||||
google-json-key
|
||||
grace-period
|
||||
ha-domain
|
||||
hairpin-mode
|
||||
hard
|
||||
hard-pod-affinity-symmetric-weight
|
||||
healthz-bind-address
|
||||
healthz-port
|
||||
heapster-namespace
|
||||
heapster-port
|
||||
heapster-scheme
|
||||
heapster-service
|
||||
horizontal-pod-autoscaler-sync-period
|
||||
host-cluster-context
|
||||
host-ipc-sources
|
||||
host-network-sources
|
||||
host-pid-sources
|
||||
host-port-endpoints
|
||||
host-system-namespace
|
||||
hostname-override
|
||||
http-check-frequency
|
||||
http-port
|
||||
ignore-daemonsets
|
||||
ignore-not-found
|
||||
image-config-file
|
||||
image-gc-high-threshold
|
||||
image-gc-low-threshold
|
||||
image-project
|
||||
image-pull-policy
|
||||
image-pull-progress-deadline
|
||||
image-service-endpoint
|
||||
include-extended-apis
|
||||
include-extended-apis
|
||||
included-types-overrides
|
||||
initial-sync-timeout
|
||||
input-base
|
||||
input-dirs
|
||||
insecure-allow-any-token
|
||||
insecure-bind-address
|
||||
insecure-experimental-approve-all-kubelet-csrs-for-group
|
||||
insecure-port
|
||||
insecure-skip-tls-verify
|
||||
instance-metadata
|
||||
instance-name-prefix
|
||||
internal-clientset-package
|
||||
iptables-drop-bit
|
||||
iptables-masquerade-bit
|
||||
iptables-min-sync-period
|
||||
iptables-sync-period
|
||||
ir-data-source
|
||||
ir-dbname
|
||||
ir-hawkular
|
||||
ir-influxdb-host
|
||||
ir-namespace-only
|
||||
ir-password
|
||||
ir-user
|
||||
jenkins-host
|
||||
jenkins-jobs
|
||||
junit-file-number
|
||||
k8s-bin-dir
|
||||
k8s-build-output
|
||||
keep-gogoproto
|
||||
km-path
|
||||
kops-admin-access
|
||||
kops-cluster
|
||||
kops-kubernetes-version
|
||||
kops-nodes
|
||||
kops-ssh-key
|
||||
kops-state
|
||||
kops-up-timeout
|
||||
kops-zones
|
||||
kube-api-burst
|
||||
kube-api-content-type
|
||||
kube-api-qps
|
||||
kube-master
|
||||
kube-master
|
||||
kube-master-url
|
||||
kube-reserved
|
||||
kubeadm-path
|
||||
kubecfg-file
|
||||
kubectl-path
|
||||
kubelet-address
|
||||
kubelet-api-servers
|
||||
kubelet-cadvisor-port
|
||||
kubelet-certificate-authority
|
||||
kubelet-cgroups
|
||||
kubelet-client-certificate
|
||||
kubelet-client-key
|
||||
kubelet-docker-endpoint
|
||||
kubelet-enable-debugging-handlers
|
||||
kubelet-flags
|
||||
kubelet-host-network-sources
|
||||
kubelet-https
|
||||
kubelet-kubeconfig
|
||||
kubelet-network-plugin
|
||||
kubelet-pod-infra-container-image
|
||||
kubelet-port
|
||||
kubelet-preferred-address-types
|
||||
kubelet-read-only-port
|
||||
kubelet-root-dir
|
||||
kubelet-sync-frequency
|
||||
kubelet-timeout
|
||||
kubernetes-anywhere-cluster
|
||||
kubernetes-anywhere-path
|
||||
kubernetes-anywhere-phase2-provider
|
||||
kubernetes-anywhere-up-timeout
|
||||
kubernetes-service-node-port
|
||||
label-columns
|
||||
large-cluster-size-threshold
|
||||
last-release-pr
|
||||
leader-elect
|
||||
leader-elect-lease-duration
|
||||
leader-elect-renew-deadline
|
||||
leader-elect-retry-period
|
||||
lease-duration
|
||||
leave-stdin-open
|
||||
limit-bytes
|
||||
listers-package
|
||||
load-balancer-ip
|
||||
lock-file
|
||||
log-flush-frequency
|
||||
long-running-request-regexp
|
||||
low-diskspace-threshold-mb
|
||||
make-iptables-util-chains
|
||||
make-symlinks
|
||||
manifest-url
|
||||
manifest-url-header
|
||||
masquerade-all
|
||||
master-os-distro
|
||||
master-service-namespace
|
||||
max-concurrency
|
||||
max-connection-bytes-per-sec
|
||||
max-log-age
|
||||
max-log-backups
|
||||
max-log-size
|
||||
max-mutating-requests-inflight
|
||||
max-open-files
|
||||
max-outgoing-burst
|
||||
max-outgoing-qps
|
||||
max-pods
|
||||
max-requests-inflight
|
||||
maximum-dead-containers
|
||||
maximum-dead-containers-per-container
|
||||
mesos-authentication-principal
|
||||
mesos-authentication-provider
|
||||
mesos-authentication-secret-file
|
||||
mesos-cgroup-prefix
|
||||
mesos-default-pod-roles
|
||||
mesos-executor-cpus
|
||||
mesos-executor-mem
|
||||
mesos-framework-roles
|
||||
mesos-generate-task-discovery
|
||||
mesos-launch-grace-period
|
||||
mesos-master
|
||||
mesos-sandbox-overlay
|
||||
mesos-user
|
||||
min-available
|
||||
min-pr-number
|
||||
min-request-timeout
|
||||
min-resync-period
|
||||
minimum-container-ttl-duration
|
||||
minimum-image-ttl-duration
|
||||
minion-max-log-age
|
||||
minion-max-log-backups
|
||||
minion-max-log-size
|
||||
minion-path-override
|
||||
namespace-sync-period
|
||||
network-plugin
|
||||
network-plugin-dir
|
||||
network-plugin-mtu
|
||||
no-headers
|
||||
no-headers
|
||||
no-suggestions
|
||||
no-suggestions
|
||||
node-cidr-mask-size
|
||||
node-eviction-rate
|
||||
node-instance-group
|
||||
node-ip
|
||||
node-labels
|
||||
node-max-log-age
|
||||
node-max-log-backups
|
||||
node-max-log-size
|
||||
node-monitor-grace-period
|
||||
node-monitor-period
|
||||
node-name
|
||||
node-os-distro
|
||||
node-path-override
|
||||
node-port
|
||||
node-schedulable-timeout
|
||||
node-startup-grace-period
|
||||
node-status-update-frequency
|
||||
node-sync-period
|
||||
non-masquerade-cidr
|
||||
num-nodes
|
||||
oidc-ca-file
|
||||
oidc-client-id
|
||||
oidc-groups-claim
|
||||
oidc-issuer-url
|
||||
oidc-username-claim
|
||||
only-idl
|
||||
oom-score-adj
|
||||
out-version
|
||||
outofdisk-transition-frequency
|
||||
output-base
|
||||
output-directory
|
||||
output-file-base
|
||||
output-package
|
||||
output-print-type
|
||||
output-version
|
||||
path-override
|
||||
pod-cidr
|
||||
pod-eviction-timeout
|
||||
pod-infra-container-image
|
||||
pod-manifest-path
|
||||
pod-network-cidr
|
||||
pod-running
|
||||
pods-per-core
|
||||
policy-config-file
|
||||
poll-interval
|
||||
portal-net
|
||||
prepull-images
|
||||
private-mountns
|
||||
prom-push-gateway
|
||||
protect-kernel-defaults
|
||||
proto-import
|
||||
proxy-bindall
|
||||
proxy-client-cert-file
|
||||
proxy-client-key-file
|
||||
proxy-kubeconfig
|
||||
proxy-logv
|
||||
proxy-mode
|
||||
proxy-port-range
|
||||
public-address-override
|
||||
pv-recycler-increment-timeout-nfs
|
||||
pv-recycler-maximum-retry
|
||||
pv-recycler-minimum-timeout-hostpath
|
||||
pv-recycler-minimum-timeout-nfs
|
||||
pv-recycler-pod-template-filepath-hostpath
|
||||
pv-recycler-pod-template-filepath-nfs
|
||||
pv-recycler-timeout-increment-hostpath
|
||||
pvclaimbinder-sync-period
|
||||
read-only-port
|
||||
really-crash-for-testing
|
||||
reconcile-cidr
|
||||
reconcile-cooldown
|
||||
reconcile-interval
|
||||
register-node
|
||||
register-retry-count
|
||||
register-schedulable
|
||||
register-with-taints
|
||||
registry-burst
|
||||
registry-qps
|
||||
reject-methods
|
||||
reject-paths
|
||||
remove-node
|
||||
repair-malformed-updates
|
||||
replicaset-lookup-cache-size
|
||||
replication-controller-lookup-cache-size
|
||||
repo-root
|
||||
report-dir
|
||||
report-prefix
|
||||
requestheader-allowed-names
|
||||
requestheader-client-ca-file
|
||||
requestheader-extra-headers-prefix
|
||||
requestheader-group-headers
|
||||
requestheader-username-headers
|
||||
require-kubeconfig
|
||||
required-contexts
|
||||
resolv-conf
|
||||
resource-container
|
||||
resource-quota-sync-period
|
||||
resource-version
|
||||
results-dir
|
||||
retry_time
|
||||
rkt-api-endpoint
|
||||
rkt-path
|
||||
rkt-stage1-image
|
||||
root-ca-file
|
||||
root-dir
|
||||
route-reconciliation-period
|
||||
run-kubelet-mode
|
||||
run-proxy
|
||||
run-services-mode
|
||||
runtime-cgroups
|
||||
runtime-config
|
||||
runtime-request-timeout
|
||||
save-config
|
||||
schedule-pods-here
|
||||
scheduler-config
|
||||
scheduler-name
|
||||
schema-cache-dir
|
||||
scopes
|
||||
seccomp-profile-root
|
||||
secondary-node-eviction-rate
|
||||
secret-name
|
||||
secure-port
|
||||
serialize-image-pulls
|
||||
server-start-timeout
|
||||
service-account-key-file
|
||||
service-account-lookup
|
||||
service-account-private-key-file
|
||||
service-address
|
||||
service-cidr
|
||||
service-cluster-ip-range
|
||||
service-dns-domain
|
||||
service-dns-suffix
|
||||
service-generator
|
||||
service-node-port-range
|
||||
service-node-ports
|
||||
service-overrides
|
||||
service-sync-period
|
||||
session-affinity
|
||||
show-all
|
||||
show-events
|
||||
show-kind
|
||||
show-labels
|
||||
shutdown-fd
|
||||
shutdown-fifo
|
||||
since-seconds
|
||||
since-time
|
||||
skip-generated-rewrite
|
||||
skip-munges
|
||||
skip-preflight-checks
|
||||
skip-unsafe
|
||||
sort-by
|
||||
source-file
|
||||
ssh-env
|
||||
ssh-keyfile
|
||||
ssh-options
|
||||
ssh-user
|
||||
start-services
|
||||
static-pods-config
|
||||
stats-port
|
||||
stop-services
|
||||
storage-backend
|
||||
storage-media-type
|
||||
storage-version
|
||||
storage-versions
|
||||
streaming-connection-idle-timeout
|
||||
suicide-timeout
|
||||
sync-frequency
|
||||
system-cgroups
|
||||
system-container
|
||||
system-pods-startup-timeout
|
||||
system-reserved
|
||||
system-validate-mode
|
||||
target-port
|
||||
target-ram-mb
|
||||
tcp-services
|
||||
terminated-pod-gc-threshold
|
||||
test-flags
|
||||
test-timeout
|
||||
tls-ca-file
|
||||
tls-cert-file
|
||||
tls-private-key-file
|
||||
to-version
|
||||
tls-sni-cert-key
|
||||
token-auth-file
|
||||
ttl-keys-prefix
|
||||
ttl-secs
|
||||
type-src
|
||||
udp-port
|
||||
udp-timeout
|
||||
unhealthy-zone-threshold
|
||||
unix-socket
|
||||
update-period
|
||||
upgrade-image
|
||||
upgrade-target
|
||||
use-service-account-credentials
|
||||
use-kubernetes-cluster-service
|
||||
use-kubernetes-version
|
||||
user-whitelist
|
||||
verify-only
|
||||
versioned-clientset-package
|
||||
viper-config
|
||||
volume-dir
|
||||
volume-plugin-dir
|
||||
volume-stats-agg-period
|
||||
watch-cache
|
||||
watch-cache-sizes
|
||||
watch-only
|
||||
whitelist-override-label
|
||||
windows-line-endings
|
||||
www-prefix
|
||||
zone-id
|
||||
zone-name
|
||||
garbage-collector-enabled
|
||||
viper-config
|
||||
log-lines-total
|
||||
run-duration
|
||||
attach-detach-reconcile-sync-period
|
||||
disable-attach-detach-reconcile-sync
|
Loading…
Add table
Add a link
Reference in a new issue