Merge pull request #1392 from umohnani8/pid-ns

Enable per pod PID namespace setting
This commit is contained in:
Mrunal Patel 2018-03-02 13:33:14 -08:00 committed by GitHub
commit 61a49a111d
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
121 changed files with 11298 additions and 5324 deletions

View File

@ -114,9 +114,6 @@ default_mounts = [
# pids_limit is the number of processes allowed in a container
pids_limit = {{ .PidsLimit }}
# enable using a shared PID namespace for containers in a pod
enable_shared_pid_namespace = {{ .EnableSharedPIDNamespace }}
# log_size_max is the max limit for the container log size in bytes.
# Negative values indicate that no limit is imposed.
log_size_max = {{ .LogSizeMax }}

View File

@ -131,9 +131,6 @@ func mergeConfig(config *server.Config, ctx *cli.Context) error {
if ctx.GlobalIsSet("pids-limit") {
config.PidsLimit = ctx.GlobalInt64("pids-limit")
}
if ctx.GlobalIsSet("enable-shared-pid-namespace") {
config.EnableSharedPIDNamespace = ctx.GlobalBool("enable-shared-pid-namespace")
}
if ctx.GlobalIsSet("log-size-max") {
config.LogSizeMax = ctx.GlobalInt64("log-size-max")
}
@ -299,10 +296,6 @@ func main() {
Value: lib.DefaultPidsLimit,
Usage: "maximum number of processes allowed in a container",
},
cli.BoolFlag{
Name: "enable-shared-pid-namespace",
Usage: "enable using a shared PID namespace for containers in a pod",
},
cli.Int64Flag{
Name: "log-size-max",
Value: lib.DefaultLogSizeMax,

View File

@ -118,7 +118,7 @@
include: "build/kubernetes.yml"
vars:
force_clone: True
k8s_git_version: "master"
k8s_git_version: "release-1.10"
k8s_github_fork: "kubernetes"
crio_socket: "/var/run/crio/crio.sock"
- name: run k8s e2e tests

View File

@ -94,8 +94,6 @@ crio [GLOBAL OPTIONS] config [OPTIONS]
**--pids-limit**="": Maximum number of processes allowed in a container (default: 1024)
**--enable-shared-pid-namespace**="": Enable using a shared PID namespace for containers in a pod (default: false)
**--root**="": The crio root dir (default: "/var/lib/containers/storage")
**--registry**="": Registry host which will be prepended to unqualified images, can be specified multiple times

View File

@ -87,9 +87,6 @@ Example:
**pids_limit**=""
Maximum number of processes allowed in a container (default: 1024)
**enable_shared_pid_namespace**=""
Enable using a shared PID namespace for containers in a pod (default: false)
**runtime**=""
OCI runtime path (default: "/usr/bin/runc")

View File

@ -121,9 +121,6 @@ type RuntimeConfig struct {
// NoPivot instructs the runtime to not use `pivot_root`, but instead use `MS_MOVE`
NoPivot bool `toml:"no_pivot"`
// EnableSharePidNamespace instructs the runtime to enable share pid namespace
EnableSharedPIDNamespace bool `toml:"enable_shared_pid_namespace"`
// Conmon is the path to conmon binary, used for managing the runtime.
Conmon string `toml:"conmon"`

View File

@ -1000,7 +1000,7 @@ func (s *Server) createSandboxContainer(ctx context.Context, containerID string,
if containerConfig.GetLinux().GetSecurityContext().GetNamespaceOptions().GetPid() == pb.NamespaceMode_NODE {
// kubernetes PodSpec specify to use Host PID namespace
specgen.RemoveLinuxNamespace(string(rspec.PIDNamespace))
} else if s.config.EnableSharedPIDNamespace {
} else if containerConfig.GetLinux().GetSecurityContext().GetNamespaceOptions().GetPid() == pb.NamespaceMode_POD {
// share Pod PID namespace
pidNsPath := fmt.Sprintf("/proc/%d/ns/pid", podInfraState.Pid)
if err := specgen.AddOrReplaceLinuxNamespace(string(rspec.PIDNamespace), pidNsPath); err != nil {

View File

@ -56,8 +56,6 @@ IMAGE_VOLUMES=${IMAGE_VOLUMES:-mkdir}
PIDS_LIMIT=${PIDS_LIMIT:-1024}
# Log size max limit
LOG_SIZE_MAX_LIMIT=${LOG_SIZE_MAX_LIMIT:--1}
# enable share container pid namespace
ENABLE_SHARED_PID_NAMESPACE=${ENABLE_SHARED_PID_NAMESPACE:-false}
TESTDIR=$(mktemp -d)
@ -217,7 +215,7 @@ function start_crio() {
"$COPYIMG_BINARY" --root "$TESTDIR/crio" $STORAGE_OPTIONS --runroot "$TESTDIR/crio-run" --image-name=docker.io/mrunalp/image-volume-test:latest --import-from=dir:"$ARTIFACTS_PATH"/image-volume-test-image --signature-policy="$INTEGRATION_ROOT"/policy.json
"$COPYIMG_BINARY" --root "$TESTDIR/crio" $STORAGE_OPTIONS --runroot "$TESTDIR/crio-run" --image-name=docker.io/library/busybox:latest --import-from=dir:"$ARTIFACTS_PATH"/busybox-image --signature-policy="$INTEGRATION_ROOT"/policy.json
"$COPYIMG_BINARY" --root "$TESTDIR/crio" $STORAGE_OPTIONS --runroot "$TESTDIR/crio-run" --image-name=docker.io/runcom/stderr-test:latest --import-from=dir:"$ARTIFACTS_PATH"/stderr-test --signature-policy="$INTEGRATION_ROOT"/policy.json
"$CRIO_BINARY" ${DEFAULT_MOUNTS_OPTS} ${HOOKS_OPTS} --conmon "$CONMON_BINARY" --listen "$CRIO_SOCKET" --cgroup-manager "$CGROUP_MANAGER" --registry "docker.io" --runtime "$RUNTIME_BINARY" --root "$TESTDIR/crio" --runroot "$TESTDIR/crio-run" $STORAGE_OPTIONS --seccomp-profile "$seccomp" --apparmor-profile "$apparmor" --cni-config-dir "$CRIO_CNI_CONFIG" --cni-plugin-dir "$CRIO_CNI_PLUGIN" --signature-policy "$INTEGRATION_ROOT"/policy.json --image-volumes "$IMAGE_VOLUMES" --pids-limit "$PIDS_LIMIT" --enable-shared-pid-namespace=${ENABLE_SHARED_PID_NAMESPACE} --log-size-max "$LOG_SIZE_MAX_LIMIT" --config /dev/null config >$CRIO_CONFIG
"$CRIO_BINARY" ${DEFAULT_MOUNTS_OPTS} ${HOOKS_OPTS} --conmon "$CONMON_BINARY" --listen "$CRIO_SOCKET" --cgroup-manager "$CGROUP_MANAGER" --registry "docker.io" --runtime "$RUNTIME_BINARY" --root "$TESTDIR/crio" --runroot "$TESTDIR/crio-run" $STORAGE_OPTIONS --seccomp-profile "$seccomp" --apparmor-profile "$apparmor" --cni-config-dir "$CRIO_CNI_CONFIG" --cni-plugin-dir "$CRIO_CNI_PLUGIN" --signature-policy "$INTEGRATION_ROOT"/policy.json --image-volumes "$IMAGE_VOLUMES" --pids-limit "$PIDS_LIMIT" --log-size-max "$LOG_SIZE_MAX_LIMIT" --config /dev/null config >$CRIO_CONFIG
# Prepare the CNI configuration files, we're running with non host networking by default
if [[ -n "$4" ]]; then

View File

@ -6,14 +6,15 @@ function teardown() {
cleanup_test
}
function pid_namespace_test() {
@test "pid_namespace_mode_pod_test" {
start_crio
run crictl runp "$TESTDATA"/sandbox_config.json
pidNamespaceMode=$(cat "$TESTDATA"/sandbox_config.json | python -c 'import json,sys;obj=json.load(sys.stdin);obj["linux"]["security_context"]["namespace_options"]["pid"] = 0; json.dump(obj, sys.stdout)')
echo "$pidNamespaceMode" > "$TESTDIR"/sandbox_pidnamespacemode_config.json
run crictl runp "$TESTDIR"/sandbox_pidnamespacemode_config.json
echo "$output"
[ "$status" -eq 0 ]
pod_id="$output"
run crictl create "$pod_id" "$TESTDATA"/container_redis.json "$TESTDATA"/sandbox_config.json
run crictl create "$pod_id" "$TESTDATA"/container_redis.json "$TESTDIR"/sandbox_pidnamespacemode_config.json
echo "$output"
[ "$status" -eq 0 ]
ctr_id="$output"
@ -23,7 +24,7 @@ function pid_namespace_test() {
run crictl exec --sync "$ctr_id" cat /proc/1/cmdline
echo "$output"
[ "$status" -eq 0 ]
[[ "$output" =~ "${EXPECTED_INIT:-redis}" ]]
[[ "$output" =~ pause ]]
run crictl stopp "$pod_id"
echo "$output"
@ -35,11 +36,3 @@ function pid_namespace_test() {
cleanup_pods
stop_crio
}
@test "pod disable shared pid namespace" {
ENABLE_SHARED_PID_NAMESPACE=false pid_namespace_test
}
@test "pod enable shared pid namespace" {
ENABLE_SHARED_PID_NAMESPACE=true EXPECTED_INIT=pause pid_namespace_test
}

View File

@ -1,11 +1,11 @@
k8s.io/kubernetes 7488d1c9210e60aef9ad49f07cb5d8a24152db88 https://github.com/kubernetes/kubernetes
k8s.io/kubernetes 305052d6d2c1fa976c7a841350396061a2c26ac0 https://github.com/kubernetes/kubernetes
k8s.io/client-go 7cd1d3291b7d9b1e2d54d4b69eb65995eaf8888e https://github.com/kubernetes/client-go
k8s.io/apimachinery 616b23029fa3dc3e0ccefd47963f5651a6543d94 https://github.com/kubernetes/apimachinery
k8s.io/apiserver 4d1163080139f1f9094baf8a3a6099e85e1867f6 https://github.com/kubernetes/apiserver
k8s.io/utils 258e2a2fa64568210fbd6267cf1d8fd87c3cb86e https://github.com/kubernetes/utils
k8s.io/api 2694da5be9c4ab4f3fd826112d4c3f71b8bf4b23 https://github.com/kubernetes/api
k8s.io/api 5ce4aa0bf2f097f6021127b3d879eeda82026be8 https://github.com/kubernetes/api
k8s.io/kube-openapi 275e2ce91dec4c05a4094a7b1daee5560b555ac9 https://github.com/kubernetes/kube-openapi
k8s.io/apiextensions-apiserver 7a8c4a8494109c5ce12cb35d9699b81a240583c0 https://github.com/kubernetes/apiextensions-apiserver
k8s.io/apiextensions-apiserver 1b31e26d82f1ec2e945c560790e98f34bb5f2e63 https://github.com/kubernetes/apiextensions-apiserver
#
github.com/googleapis/gnostic 0c5108395e2debce0d731cf0287ddf7242066aba
github.com/gregjones/httpcache 787624de3eb7bd915c329cba748687a3b22666a6

View File

@ -16,7 +16,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
// This file was autogenerated by deepcopy-gen. Do not edit it manually!
// Code generated by deepcopy-gen. DO NOT EDIT.
package v1alpha1
@ -76,9 +76,8 @@ func (in *InitializerConfiguration) DeepCopy() *InitializerConfiguration {
func (in *InitializerConfiguration) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -110,9 +109,8 @@ func (in *InitializerConfigurationList) DeepCopy() *InitializerConfigurationList
func (in *InitializerConfigurationList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

View File

@ -16,7 +16,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
// This file was autogenerated by deepcopy-gen. Do not edit it manually!
// Code generated by deepcopy-gen. DO NOT EDIT.
package v1beta1
@ -54,9 +54,8 @@ func (in *MutatingWebhookConfiguration) DeepCopy() *MutatingWebhookConfiguration
func (in *MutatingWebhookConfiguration) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -88,9 +87,8 @@ func (in *MutatingWebhookConfigurationList) DeepCopy() *MutatingWebhookConfigura
func (in *MutatingWebhookConfigurationList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -200,9 +198,8 @@ func (in *ValidatingWebhookConfiguration) DeepCopy() *ValidatingWebhookConfigura
func (in *ValidatingWebhookConfiguration) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -234,9 +231,8 @@ func (in *ValidatingWebhookConfigurationList) DeepCopy() *ValidatingWebhookConfi
func (in *ValidatingWebhookConfigurationList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

View File

@ -16,7 +16,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
// This file was autogenerated by deepcopy-gen. Do not edit it manually!
// Code generated by deepcopy-gen. DO NOT EDIT.
package v1
@ -50,9 +50,8 @@ func (in *ControllerRevision) DeepCopy() *ControllerRevision {
func (in *ControllerRevision) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -84,9 +83,8 @@ func (in *ControllerRevisionList) DeepCopy() *ControllerRevisionList {
func (in *ControllerRevisionList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -113,9 +111,8 @@ func (in *DaemonSet) DeepCopy() *DaemonSet {
func (in *DaemonSet) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -164,9 +161,8 @@ func (in *DaemonSetList) DeepCopy() *DaemonSetList {
func (in *DaemonSetList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -286,9 +282,8 @@ func (in *Deployment) DeepCopy() *Deployment {
func (in *Deployment) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -338,9 +333,8 @@ func (in *DeploymentList) DeepCopy() *DeploymentList {
func (in *DeploymentList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -478,9 +472,8 @@ func (in *ReplicaSet) DeepCopy() *ReplicaSet {
func (in *ReplicaSet) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -529,9 +522,8 @@ func (in *ReplicaSetList) DeepCopy() *ReplicaSetList {
func (in *ReplicaSetList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -700,9 +692,8 @@ func (in *StatefulSet) DeepCopy() *StatefulSet {
func (in *StatefulSet) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -751,9 +742,8 @@ func (in *StatefulSetList) DeepCopy() *StatefulSetList {
func (in *StatefulSetList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

View File

@ -16,7 +16,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
// This file was autogenerated by deepcopy-gen. Do not edit it manually!
// Code generated by deepcopy-gen. DO NOT EDIT.
package v1beta1
@ -50,9 +50,8 @@ func (in *ControllerRevision) DeepCopy() *ControllerRevision {
func (in *ControllerRevision) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -84,9 +83,8 @@ func (in *ControllerRevisionList) DeepCopy() *ControllerRevisionList {
func (in *ControllerRevisionList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -113,9 +111,8 @@ func (in *Deployment) DeepCopy() *Deployment {
func (in *Deployment) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -165,9 +162,8 @@ func (in *DeploymentList) DeepCopy() *DeploymentList {
func (in *DeploymentList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -199,9 +195,8 @@ func (in *DeploymentRollback) DeepCopy() *DeploymentRollback {
func (in *DeploymentRollback) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -423,9 +418,8 @@ func (in *Scale) DeepCopy() *Scale {
func (in *Scale) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -491,9 +485,8 @@ func (in *StatefulSet) DeepCopy() *StatefulSet {
func (in *StatefulSet) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -542,9 +535,8 @@ func (in *StatefulSetList) DeepCopy() *StatefulSetList {
func (in *StatefulSetList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

View File

@ -16,7 +16,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
// This file was autogenerated by deepcopy-gen. Do not edit it manually!
// Code generated by deepcopy-gen. DO NOT EDIT.
package v1beta2
@ -50,9 +50,8 @@ func (in *ControllerRevision) DeepCopy() *ControllerRevision {
func (in *ControllerRevision) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -84,9 +83,8 @@ func (in *ControllerRevisionList) DeepCopy() *ControllerRevisionList {
func (in *ControllerRevisionList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -113,9 +111,8 @@ func (in *DaemonSet) DeepCopy() *DaemonSet {
func (in *DaemonSet) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -164,9 +161,8 @@ func (in *DaemonSetList) DeepCopy() *DaemonSetList {
func (in *DaemonSetList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -286,9 +282,8 @@ func (in *Deployment) DeepCopy() *Deployment {
func (in *Deployment) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -338,9 +333,8 @@ func (in *DeploymentList) DeepCopy() *DeploymentList {
func (in *DeploymentList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -478,9 +472,8 @@ func (in *ReplicaSet) DeepCopy() *ReplicaSet {
func (in *ReplicaSet) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -529,9 +522,8 @@ func (in *ReplicaSetList) DeepCopy() *ReplicaSetList {
func (in *ReplicaSetList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -700,9 +692,8 @@ func (in *Scale) DeepCopy() *Scale {
func (in *Scale) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -768,9 +759,8 @@ func (in *StatefulSet) DeepCopy() *StatefulSet {
func (in *StatefulSet) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -819,9 +809,8 @@ func (in *StatefulSetList) DeepCopy() *StatefulSetList {
func (in *StatefulSetList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

View File

@ -16,7 +16,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
// This file was autogenerated by deepcopy-gen. Do not edit it manually!
// Code generated by deepcopy-gen. DO NOT EDIT.
package v1
@ -40,6 +40,26 @@ func (in *BoundObjectReference) DeepCopy() *BoundObjectReference {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in ExtraValue) DeepCopyInto(out *ExtraValue) {
{
in := &in
*out = make(ExtraValue, len(*in))
copy(*out, *in)
return
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtraValue.
func (in ExtraValue) DeepCopy() ExtraValue {
if in == nil {
return nil
}
out := new(ExtraValue)
in.DeepCopyInto(out)
return *out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *TokenRequest) DeepCopyInto(out *TokenRequest) {
*out = *in
@ -64,9 +84,8 @@ func (in *TokenRequest) DeepCopy() *TokenRequest {
func (in *TokenRequest) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -149,9 +168,8 @@ func (in *TokenReview) DeepCopy() *TokenReview {
func (in *TokenReview) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -199,8 +217,12 @@ func (in *UserInfo) DeepCopyInto(out *UserInfo) {
in, out := &in.Extra, &out.Extra
*out = make(map[string]ExtraValue, len(*in))
for key, val := range *in {
(*out)[key] = make(ExtraValue, len(val))
copy((*out)[key], val)
if val == nil {
(*out)[key] = nil
} else {
(*out)[key] = make([]string, len(val))
copy((*out)[key], val)
}
}
}
return

View File

@ -16,7 +16,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
// This file was autogenerated by deepcopy-gen. Do not edit it manually!
// Code generated by deepcopy-gen. DO NOT EDIT.
package v1beta1
@ -24,6 +24,26 @@ import (
runtime "k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in ExtraValue) DeepCopyInto(out *ExtraValue) {
{
in := &in
*out = make(ExtraValue, len(*in))
copy(*out, *in)
return
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtraValue.
func (in ExtraValue) DeepCopy() ExtraValue {
if in == nil {
return nil
}
out := new(ExtraValue)
in.DeepCopyInto(out)
return *out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *TokenReview) DeepCopyInto(out *TokenReview) {
*out = *in
@ -48,9 +68,8 @@ func (in *TokenReview) DeepCopy() *TokenReview {
func (in *TokenReview) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -98,8 +117,12 @@ func (in *UserInfo) DeepCopyInto(out *UserInfo) {
in, out := &in.Extra, &out.Extra
*out = make(map[string]ExtraValue, len(*in))
for key, val := range *in {
(*out)[key] = make(ExtraValue, len(val))
copy((*out)[key], val)
if val == nil {
(*out)[key] = nil
} else {
(*out)[key] = make([]string, len(val))
copy((*out)[key], val)
}
}
}
return

View File

@ -16,7 +16,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
// This file was autogenerated by deepcopy-gen. Do not edit it manually!
// Code generated by deepcopy-gen. DO NOT EDIT.
package v1
@ -24,6 +24,26 @@ import (
runtime "k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in ExtraValue) DeepCopyInto(out *ExtraValue) {
{
in := &in
*out = make(ExtraValue, len(*in))
copy(*out, *in)
return
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtraValue.
func (in ExtraValue) DeepCopy() ExtraValue {
if in == nil {
return nil
}
out := new(ExtraValue)
in.DeepCopyInto(out)
return *out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *LocalSubjectAccessReview) DeepCopyInto(out *LocalSubjectAccessReview) {
*out = *in
@ -48,9 +68,8 @@ func (in *LocalSubjectAccessReview) DeepCopy() *LocalSubjectAccessReview {
func (in *LocalSubjectAccessReview) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -171,9 +190,8 @@ func (in *SelfSubjectAccessReview) DeepCopy() *SelfSubjectAccessReview {
func (in *SelfSubjectAccessReview) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -234,9 +252,8 @@ func (in *SelfSubjectRulesReview) DeepCopy() *SelfSubjectRulesReview {
func (in *SelfSubjectRulesReview) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -279,9 +296,8 @@ func (in *SubjectAccessReview) DeepCopy() *SubjectAccessReview {
func (in *SubjectAccessReview) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -314,8 +330,12 @@ func (in *SubjectAccessReviewSpec) DeepCopyInto(out *SubjectAccessReviewSpec) {
in, out := &in.Extra, &out.Extra
*out = make(map[string]ExtraValue, len(*in))
for key, val := range *in {
(*out)[key] = make(ExtraValue, len(val))
copy((*out)[key], val)
if val == nil {
(*out)[key] = nil
} else {
(*out)[key] = make([]string, len(val))
copy((*out)[key], val)
}
}
}
return

View File

@ -16,7 +16,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
// This file was autogenerated by deepcopy-gen. Do not edit it manually!
// Code generated by deepcopy-gen. DO NOT EDIT.
package v1beta1
@ -24,6 +24,26 @@ import (
runtime "k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in ExtraValue) DeepCopyInto(out *ExtraValue) {
{
in := &in
*out = make(ExtraValue, len(*in))
copy(*out, *in)
return
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtraValue.
func (in ExtraValue) DeepCopy() ExtraValue {
if in == nil {
return nil
}
out := new(ExtraValue)
in.DeepCopyInto(out)
return *out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *LocalSubjectAccessReview) DeepCopyInto(out *LocalSubjectAccessReview) {
*out = *in
@ -48,9 +68,8 @@ func (in *LocalSubjectAccessReview) DeepCopy() *LocalSubjectAccessReview {
func (in *LocalSubjectAccessReview) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -171,9 +190,8 @@ func (in *SelfSubjectAccessReview) DeepCopy() *SelfSubjectAccessReview {
func (in *SelfSubjectAccessReview) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -234,9 +252,8 @@ func (in *SelfSubjectRulesReview) DeepCopy() *SelfSubjectRulesReview {
func (in *SelfSubjectRulesReview) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -279,9 +296,8 @@ func (in *SubjectAccessReview) DeepCopy() *SubjectAccessReview {
func (in *SubjectAccessReview) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -314,8 +330,12 @@ func (in *SubjectAccessReviewSpec) DeepCopyInto(out *SubjectAccessReviewSpec) {
in, out := &in.Extra, &out.Extra
*out = make(map[string]ExtraValue, len(*in))
for key, val := range *in {
(*out)[key] = make(ExtraValue, len(val))
copy((*out)[key], val)
if val == nil {
(*out)[key] = nil
} else {
(*out)[key] = make([]string, len(val))
copy((*out)[key], val)
}
}
}
return

File diff suppressed because it is too large Load Diff

View File

@ -44,6 +44,49 @@ message CrossVersionObjectReference {
optional string apiVersion = 3;
}
// ExternalMetricSource indicates how to scale on a metric not associated with
// any Kubernetes object (for example length of queue in cloud
// messaging service, or QPS from loadbalancer running outside of cluster).
message ExternalMetricSource {
// metricName is the name of the metric in question.
optional string metricName = 1;
// metricSelector is used to identify a specific time series
// within a given metric.
// +optional
optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector metricSelector = 2;
// targetValue is the target value of the metric (as a quantity).
// Mutually exclusive with TargetAverageValue.
// +optional
optional k8s.io.apimachinery.pkg.api.resource.Quantity targetValue = 3;
// targetAverageValue is the target per-pod value of global metric (as a quantity).
// Mutually exclusive with TargetValue.
// +optional
optional k8s.io.apimachinery.pkg.api.resource.Quantity targetAverageValue = 4;
}
// ExternalMetricStatus indicates the current value of a global metric
// not associated with any Kubernetes object.
message ExternalMetricStatus {
// metricName is the name of a metric used for autoscaling in
// metric system.
optional string metricName = 1;
// metricSelector is used to identify a specific time series
// within a given metric.
// +optional
optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector metricSelector = 2;
// currentValue is the current value of the metric (as a quantity)
optional k8s.io.apimachinery.pkg.api.resource.Quantity currentValue = 3;
// currentAverageValue is the current value of metric averaged over autoscaled pods.
// +optional
optional k8s.io.apimachinery.pkg.api.resource.Quantity currentAverageValue = 4;
}
// configuration of a horizontal pod autoscaler.
message HorizontalPodAutoscaler {
// Standard object metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
@ -160,6 +203,14 @@ message MetricSpec {
// to normal per-pod metrics using the "pods" source.
// +optional
optional ResourceMetricSource resource = 4;
// external refers to a global metric that is not associated
// with any Kubernetes object. It allows autoscaling based on information
// coming from components running outside of cluster
// (for example length of queue in cloud messaging service, or
// QPS from loadbalancer running outside of cluster).
// +optional
optional ExternalMetricSource external = 5;
}
// MetricStatus describes the last-read state of a single metric.
@ -186,6 +237,14 @@ message MetricStatus {
// to normal per-pod metrics using the "pods" source.
// +optional
optional ResourceMetricStatus resource = 4;
// external refers to a global metric that is not associated
// with any Kubernetes object. It allows autoscaling based on information
// coming from components running outside of cluster
// (for example length of queue in cloud messaging service, or
// QPS from loadbalancer running outside of cluster).
// +optional
optional ExternalMetricStatus external = 5;
}
// ObjectMetricSource indicates how to scale on a metric describing a

View File

@ -161,6 +161,12 @@ var (
// Kubernetes, and have special scaling options on top of those available
// to normal per-pod metrics (the "pods" source).
ResourceMetricSourceType MetricSourceType = "Resource"
// ExternalMetricSourceType is a global metric that is not associated
// with any Kubernetes object. It allows autoscaling based on information
// coming from components running outside of cluster
// (for example length of queue in cloud messaging service, or
// QPS from loadbalancer running outside of cluster).
ExternalMetricSourceType MetricSourceType = "External"
)
// MetricSpec specifies how to scale based on a single metric
@ -186,6 +192,13 @@ type MetricSpec struct {
// to normal per-pod metrics using the "pods" source.
// +optional
Resource *ResourceMetricSource `json:"resource,omitempty" protobuf:"bytes,4,opt,name=resource"`
// external refers to a global metric that is not associated
// with any Kubernetes object. It allows autoscaling based on information
// coming from components running outside of cluster
// (for example length of queue in cloud messaging service, or
// QPS from loadbalancer running outside of cluster).
// +optional
External *ExternalMetricSource `json:"external,omitempty" protobuf:"bytes,5,opt,name=external"`
}
// ObjectMetricSource indicates how to scale on a metric describing a
@ -234,6 +247,26 @@ type ResourceMetricSource struct {
TargetAverageValue *resource.Quantity `json:"targetAverageValue,omitempty" protobuf:"bytes,3,opt,name=targetAverageValue"`
}
// ExternalMetricSource indicates how to scale on a metric not associated with
// any Kubernetes object (for example length of queue in cloud
// messaging service, or QPS from loadbalancer running outside of cluster).
type ExternalMetricSource struct {
// metricName is the name of the metric in question.
MetricName string `json:"metricName" protobuf:"bytes,1,name=metricName"`
// metricSelector is used to identify a specific time series
// within a given metric.
// +optional
MetricSelector *metav1.LabelSelector `json:"metricSelector,omitempty" protobuf:"bytes,2,opt,name=metricSelector"`
// targetValue is the target value of the metric (as a quantity).
// Mutually exclusive with TargetAverageValue.
// +optional
TargetValue *resource.Quantity `json:"targetValue,omitempty" protobuf:"bytes,3,opt,name=targetValue"`
// targetAverageValue is the target per-pod value of global metric (as a quantity).
// Mutually exclusive with TargetValue.
// +optional
TargetAverageValue *resource.Quantity `json:"targetAverageValue,omitempty" protobuf:"bytes,4,opt,name=targetAverageValue"`
}
// MetricStatus describes the last-read state of a single metric.
type MetricStatus struct {
// type is the type of metric source. It will be one of "Object",
@ -256,6 +289,13 @@ type MetricStatus struct {
// to normal per-pod metrics using the "pods" source.
// +optional
Resource *ResourceMetricStatus `json:"resource,omitempty" protobuf:"bytes,4,opt,name=resource"`
// external refers to a global metric that is not associated
// with any Kubernetes object. It allows autoscaling based on information
// coming from components running outside of cluster
// (for example length of queue in cloud messaging service, or
// QPS from loadbalancer running outside of cluster).
// +optional
External *ExternalMetricStatus `json:"external,omitempty" protobuf:"bytes,5,opt,name=external"`
}
// HorizontalPodAutoscalerConditionType are the valid conditions of
@ -337,3 +377,20 @@ type ResourceMetricStatus struct {
// It will always be set, regardless of the corresponding metric specification.
CurrentAverageValue resource.Quantity `json:"currentAverageValue" protobuf:"bytes,3,name=currentAverageValue"`
}
// ExternalMetricStatus indicates the current value of a global metric
// not associated with any Kubernetes object.
type ExternalMetricStatus struct {
// metricName is the name of a metric used for autoscaling in
// metric system.
MetricName string `json:"metricName" protobuf:"bytes,1,name=metricName"`
// metricSelector is used to identify a specific time series
// within a given metric.
// +optional
MetricSelector *metav1.LabelSelector `json:"metricSelector,omitempty" protobuf:"bytes,2,opt,name=metricSelector"`
// currentValue is the current value of the metric (as a quantity)
CurrentValue resource.Quantity `json:"currentValue" protobuf:"bytes,3,name=currentValue"`
// currentAverageValue is the current value of metric averaged over autoscaled pods.
// +optional
CurrentAverageValue *resource.Quantity `json:"currentAverageValue,omitempty" protobuf:"bytes,4,opt,name=currentAverageValue"`
}

View File

@ -38,6 +38,30 @@ func (CrossVersionObjectReference) SwaggerDoc() map[string]string {
return map_CrossVersionObjectReference
}
var map_ExternalMetricSource = map[string]string{
"": "ExternalMetricSource indicates how to scale on a metric not associated with any Kubernetes object (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster).",
"metricName": "metricName is the name of the metric in question.",
"metricSelector": "metricSelector is used to identify a specific time series within a given metric.",
"targetValue": "targetValue is the target value of the metric (as a quantity). Mutually exclusive with TargetAverageValue.",
"targetAverageValue": "targetAverageValue is the target per-pod value of global metric (as a quantity). Mutually exclusive with TargetValue.",
}
func (ExternalMetricSource) SwaggerDoc() map[string]string {
return map_ExternalMetricSource
}
var map_ExternalMetricStatus = map[string]string{
"": "ExternalMetricStatus indicates the current value of a global metric not associated with any Kubernetes object.",
"metricName": "metricName is the name of a metric used for autoscaling in metric system.",
"metricSelector": "metricSelector is used to identify a specific time series within a given metric.",
"currentValue": "currentValue is the current value of the metric (as a quantity)",
"currentAverageValue": "currentAverageValue is the current value of metric averaged over autoscaled pods.",
}
func (ExternalMetricStatus) SwaggerDoc() map[string]string {
return map_ExternalMetricStatus
}
var map_HorizontalPodAutoscaler = map[string]string{
"": "configuration of a horizontal pod autoscaler.",
"metadata": "Standard object metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata",
@ -103,6 +127,7 @@ var map_MetricSpec = map[string]string{
"object": "object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object).",
"pods": "pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.",
"resource": "resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.",
"external": "external refers to a global metric that is not associated with any Kubernetes object. It allows autoscaling based on information coming from components running outside of cluster (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster).",
}
func (MetricSpec) SwaggerDoc() map[string]string {
@ -115,6 +140,7 @@ var map_MetricStatus = map[string]string{
"object": "object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object).",
"pods": "pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.",
"resource": "resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.",
"external": "external refers to a global metric that is not associated with any Kubernetes object. It allows autoscaling based on information coming from components running outside of cluster (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster).",
}
func (MetricStatus) SwaggerDoc() map[string]string {

View File

@ -16,12 +16,11 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
// This file was autogenerated by deepcopy-gen. Do not edit it manually!
// Code generated by deepcopy-gen. DO NOT EDIT.
package v1
import (
resource "k8s.io/apimachinery/pkg/api/resource"
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
)
@ -42,6 +41,84 @@ func (in *CrossVersionObjectReference) DeepCopy() *CrossVersionObjectReference {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ExternalMetricSource) DeepCopyInto(out *ExternalMetricSource) {
*out = *in
if in.MetricSelector != nil {
in, out := &in.MetricSelector, &out.MetricSelector
if *in == nil {
*out = nil
} else {
*out = new(meta_v1.LabelSelector)
(*in).DeepCopyInto(*out)
}
}
if in.TargetValue != nil {
in, out := &in.TargetValue, &out.TargetValue
if *in == nil {
*out = nil
} else {
x := (*in).DeepCopy()
*out = &x
}
}
if in.TargetAverageValue != nil {
in, out := &in.TargetAverageValue, &out.TargetAverageValue
if *in == nil {
*out = nil
} else {
x := (*in).DeepCopy()
*out = &x
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalMetricSource.
func (in *ExternalMetricSource) DeepCopy() *ExternalMetricSource {
if in == nil {
return nil
}
out := new(ExternalMetricSource)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ExternalMetricStatus) DeepCopyInto(out *ExternalMetricStatus) {
*out = *in
if in.MetricSelector != nil {
in, out := &in.MetricSelector, &out.MetricSelector
if *in == nil {
*out = nil
} else {
*out = new(meta_v1.LabelSelector)
(*in).DeepCopyInto(*out)
}
}
out.CurrentValue = in.CurrentValue.DeepCopy()
if in.CurrentAverageValue != nil {
in, out := &in.CurrentAverageValue, &out.CurrentAverageValue
if *in == nil {
*out = nil
} else {
x := (*in).DeepCopy()
*out = &x
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalMetricStatus.
func (in *ExternalMetricStatus) DeepCopy() *ExternalMetricStatus {
if in == nil {
return nil
}
out := new(ExternalMetricStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *HorizontalPodAutoscaler) DeepCopyInto(out *HorizontalPodAutoscaler) {
*out = *in
@ -66,9 +143,8 @@ func (in *HorizontalPodAutoscaler) DeepCopy() *HorizontalPodAutoscaler {
func (in *HorizontalPodAutoscaler) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -117,9 +193,8 @@ func (in *HorizontalPodAutoscalerList) DeepCopy() *HorizontalPodAutoscalerList {
func (in *HorizontalPodAutoscalerList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -174,8 +249,7 @@ func (in *HorizontalPodAutoscalerStatus) DeepCopyInto(out *HorizontalPodAutoscal
if *in == nil {
*out = nil
} else {
*out = new(meta_v1.Time)
(*in).DeepCopyInto(*out)
*out = (*in).DeepCopy()
}
}
if in.CurrentCPUUtilizationPercentage != nil {
@ -230,6 +304,15 @@ func (in *MetricSpec) DeepCopyInto(out *MetricSpec) {
(*in).DeepCopyInto(*out)
}
}
if in.External != nil {
in, out := &in.External, &out.External
if *in == nil {
*out = nil
} else {
*out = new(ExternalMetricSource)
(*in).DeepCopyInto(*out)
}
}
return
}
@ -273,6 +356,15 @@ func (in *MetricStatus) DeepCopyInto(out *MetricStatus) {
(*in).DeepCopyInto(*out)
}
}
if in.External != nil {
in, out := &in.External, &out.External
if *in == nil {
*out = nil
} else {
*out = new(ExternalMetricStatus)
(*in).DeepCopyInto(*out)
}
}
return
}
@ -373,8 +465,8 @@ func (in *ResourceMetricSource) DeepCopyInto(out *ResourceMetricSource) {
if *in == nil {
*out = nil
} else {
*out = new(resource.Quantity)
**out = (*in).DeepCopy()
x := (*in).DeepCopy()
*out = &x
}
}
return
@ -440,9 +532,8 @@ func (in *Scale) DeepCopy() *Scale {
func (in *Scale) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

File diff suppressed because it is too large Load Diff

View File

@ -44,6 +44,50 @@ message CrossVersionObjectReference {
optional string apiVersion = 3;
}
// ExternalMetricSource indicates how to scale on a metric not associated with
// any Kubernetes object (for example length of queue in cloud
// messaging service, or QPS from loadbalancer running outside of cluster).
// Exactly one "target" type should be set.
message ExternalMetricSource {
// metricName is the name of the metric in question.
optional string metricName = 1;
// metricSelector is used to identify a specific time series
// within a given metric.
// +optional
optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector metricSelector = 2;
// targetValue is the target value of the metric (as a quantity).
// Mutually exclusive with TargetAverageValue.
// +optional
optional k8s.io.apimachinery.pkg.api.resource.Quantity targetValue = 3;
// targetAverageValue is the target per-pod value of global metric (as a quantity).
// Mutually exclusive with TargetValue.
// +optional
optional k8s.io.apimachinery.pkg.api.resource.Quantity targetAverageValue = 4;
}
// ExternalMetricStatus indicates the current value of a global metric
// not associated with any Kubernetes object.
message ExternalMetricStatus {
// metricName is the name of a metric used for autoscaling in
// metric system.
optional string metricName = 1;
// metricSelector is used to identify a specific time series
// within a given metric.
// +optional
optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector metricSelector = 2;
// currentValue is the current value of the metric (as a quantity)
optional k8s.io.apimachinery.pkg.api.resource.Quantity currentValue = 3;
// currentAverageValue is the current value of metric averaged over autoscaled pods.
// +optional
optional k8s.io.apimachinery.pkg.api.resource.Quantity currentAverageValue = 4;
}
// HorizontalPodAutoscaler is the configuration for a horizontal pod
// autoscaler, which automatically manages the replica count of any resource
// implementing the scale subresource based on the metrics specified.
@ -175,6 +219,14 @@ message MetricSpec {
// to normal per-pod metrics using the "pods" source.
// +optional
optional ResourceMetricSource resource = 4;
// external refers to a global metric that is not associated
// with any Kubernetes object. It allows autoscaling based on information
// coming from components running outside of cluster
// (for example length of queue in cloud messaging service, or
// QPS from loadbalancer running outside of cluster).
// +optional
optional ExternalMetricSource external = 5;
}
// MetricStatus describes the last-read state of a single metric.
@ -201,6 +253,14 @@ message MetricStatus {
// to normal per-pod metrics using the "pods" source.
// +optional
optional ResourceMetricStatus resource = 4;
// external refers to a global metric that is not associated
// with any Kubernetes object. It allows autoscaling based on information
// coming from components running outside of cluster
// (for example length of queue in cloud messaging service, or
// QPS from loadbalancer running outside of cluster).
// +optional
optional ExternalMetricStatus external = 5;
}
// ObjectMetricSource indicates how to scale on a metric describing a

View File

@ -73,6 +73,12 @@ var (
// Kubernetes, and have special scaling options on top of those available
// to normal per-pod metrics (the "pods" source).
ResourceMetricSourceType MetricSourceType = "Resource"
// ExternalMetricSourceType is a global metric that is not associated
// with any Kubernetes object. It allows autoscaling based on information
// coming from components running outside of cluster
// (for example length of queue in cloud messaging service, or
// QPS from loadbalancer running outside of cluster).
ExternalMetricSourceType MetricSourceType = "External"
)
// MetricSpec specifies how to scale based on a single metric
@ -98,6 +104,13 @@ type MetricSpec struct {
// to normal per-pod metrics using the "pods" source.
// +optional
Resource *ResourceMetricSource `json:"resource,omitempty" protobuf:"bytes,4,opt,name=resource"`
// external refers to a global metric that is not associated
// with any Kubernetes object. It allows autoscaling based on information
// coming from components running outside of cluster
// (for example length of queue in cloud messaging service, or
// QPS from loadbalancer running outside of cluster).
// +optional
External *ExternalMetricSource `json:"external,omitempty" protobuf:"bytes,5,opt,name=external"`
}
// ObjectMetricSource indicates how to scale on a metric describing a
@ -146,6 +159,27 @@ type ResourceMetricSource struct {
TargetAverageValue *resource.Quantity `json:"targetAverageValue,omitempty" protobuf:"bytes,3,opt,name=targetAverageValue"`
}
// ExternalMetricSource indicates how to scale on a metric not associated with
// any Kubernetes object (for example length of queue in cloud
// messaging service, or QPS from loadbalancer running outside of cluster).
// Exactly one "target" type should be set.
type ExternalMetricSource struct {
// metricName is the name of the metric in question.
MetricName string `json:"metricName" protobuf:"bytes,1,name=metricName"`
// metricSelector is used to identify a specific time series
// within a given metric.
// +optional
MetricSelector *metav1.LabelSelector `json:"metricSelector,omitempty" protobuf:"bytes,2,opt,name=metricSelector"`
// targetValue is the target value of the metric (as a quantity).
// Mutually exclusive with TargetAverageValue.
// +optional
TargetValue *resource.Quantity `json:"targetValue,omitempty" protobuf:"bytes,3,opt,name=targetValue"`
// targetAverageValue is the target per-pod value of global metric (as a quantity).
// Mutually exclusive with TargetValue.
// +optional
TargetAverageValue *resource.Quantity `json:"targetAverageValue,omitempty" protobuf:"bytes,4,opt,name=targetAverageValue"`
}
// HorizontalPodAutoscalerStatus describes the current status of a horizontal pod autoscaler.
type HorizontalPodAutoscalerStatus struct {
// observedGeneration is the most recent generation observed by this autoscaler.
@ -231,6 +265,13 @@ type MetricStatus struct {
// to normal per-pod metrics using the "pods" source.
// +optional
Resource *ResourceMetricStatus `json:"resource,omitempty" protobuf:"bytes,4,opt,name=resource"`
// external refers to a global metric that is not associated
// with any Kubernetes object. It allows autoscaling based on information
// coming from components running outside of cluster
// (for example length of queue in cloud messaging service, or
// QPS from loadbalancer running outside of cluster).
// +optional
External *ExternalMetricStatus `json:"external,omitempty" protobuf:"bytes,5,opt,name=external"`
}
// ObjectMetricStatus indicates the current value of a metric describing a
@ -277,6 +318,23 @@ type ResourceMetricStatus struct {
CurrentAverageValue resource.Quantity `json:"currentAverageValue" protobuf:"bytes,3,name=currentAverageValue"`
}
// ExternalMetricStatus indicates the current value of a global metric
// not associated with any Kubernetes object.
type ExternalMetricStatus struct {
// metricName is the name of a metric used for autoscaling in
// metric system.
MetricName string `json:"metricName" protobuf:"bytes,1,name=metricName"`
// metricSelector is used to identify a specific time series
// within a given metric.
// +optional
MetricSelector *metav1.LabelSelector `json:"metricSelector,omitempty" protobuf:"bytes,2,opt,name=metricSelector"`
// currentValue is the current value of the metric (as a quantity)
CurrentValue resource.Quantity `json:"currentValue" protobuf:"bytes,3,name=currentValue"`
// currentAverageValue is the current value of metric averaged over autoscaled pods.
// +optional
CurrentAverageValue *resource.Quantity `json:"currentAverageValue,omitempty" protobuf:"bytes,4,opt,name=currentAverageValue"`
}
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object

View File

@ -38,6 +38,30 @@ func (CrossVersionObjectReference) SwaggerDoc() map[string]string {
return map_CrossVersionObjectReference
}
var map_ExternalMetricSource = map[string]string{
"": "ExternalMetricSource indicates how to scale on a metric not associated with any Kubernetes object (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster). Exactly one \"target\" type should be set.",
"metricName": "metricName is the name of the metric in question.",
"metricSelector": "metricSelector is used to identify a specific time series within a given metric.",
"targetValue": "targetValue is the target value of the metric (as a quantity). Mutually exclusive with TargetAverageValue.",
"targetAverageValue": "targetAverageValue is the target per-pod value of global metric (as a quantity). Mutually exclusive with TargetValue.",
}
func (ExternalMetricSource) SwaggerDoc() map[string]string {
return map_ExternalMetricSource
}
var map_ExternalMetricStatus = map[string]string{
"": "ExternalMetricStatus indicates the current value of a global metric not associated with any Kubernetes object.",
"metricName": "metricName is the name of a metric used for autoscaling in metric system.",
"metricSelector": "metricSelector is used to identify a specific time series within a given metric.",
"currentValue": "currentValue is the current value of the metric (as a quantity)",
"currentAverageValue": "currentAverageValue is the current value of metric averaged over autoscaled pods.",
}
func (ExternalMetricStatus) SwaggerDoc() map[string]string {
return map_ExternalMetricStatus
}
var map_HorizontalPodAutoscaler = map[string]string{
"": "HorizontalPodAutoscaler is the configuration for a horizontal pod autoscaler, which automatically manages the replica count of any resource implementing the scale subresource based on the metrics specified.",
"metadata": "metadata is the standard object metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata",
@ -104,6 +128,7 @@ var map_MetricSpec = map[string]string{
"object": "object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object).",
"pods": "pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.",
"resource": "resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.",
"external": "external refers to a global metric that is not associated with any Kubernetes object. It allows autoscaling based on information coming from components running outside of cluster (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster).",
}
func (MetricSpec) SwaggerDoc() map[string]string {
@ -116,6 +141,7 @@ var map_MetricStatus = map[string]string{
"object": "object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object).",
"pods": "pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.",
"resource": "resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.",
"external": "external refers to a global metric that is not associated with any Kubernetes object. It allows autoscaling based on information coming from components running outside of cluster (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster).",
}
func (MetricStatus) SwaggerDoc() map[string]string {

View File

@ -16,12 +16,11 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
// This file was autogenerated by deepcopy-gen. Do not edit it manually!
// Code generated by deepcopy-gen. DO NOT EDIT.
package v2beta1
import (
resource "k8s.io/apimachinery/pkg/api/resource"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
)
@ -42,6 +41,84 @@ func (in *CrossVersionObjectReference) DeepCopy() *CrossVersionObjectReference {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ExternalMetricSource) DeepCopyInto(out *ExternalMetricSource) {
*out = *in
if in.MetricSelector != nil {
in, out := &in.MetricSelector, &out.MetricSelector
if *in == nil {
*out = nil
} else {
*out = new(v1.LabelSelector)
(*in).DeepCopyInto(*out)
}
}
if in.TargetValue != nil {
in, out := &in.TargetValue, &out.TargetValue
if *in == nil {
*out = nil
} else {
x := (*in).DeepCopy()
*out = &x
}
}
if in.TargetAverageValue != nil {
in, out := &in.TargetAverageValue, &out.TargetAverageValue
if *in == nil {
*out = nil
} else {
x := (*in).DeepCopy()
*out = &x
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalMetricSource.
func (in *ExternalMetricSource) DeepCopy() *ExternalMetricSource {
if in == nil {
return nil
}
out := new(ExternalMetricSource)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ExternalMetricStatus) DeepCopyInto(out *ExternalMetricStatus) {
*out = *in
if in.MetricSelector != nil {
in, out := &in.MetricSelector, &out.MetricSelector
if *in == nil {
*out = nil
} else {
*out = new(v1.LabelSelector)
(*in).DeepCopyInto(*out)
}
}
out.CurrentValue = in.CurrentValue.DeepCopy()
if in.CurrentAverageValue != nil {
in, out := &in.CurrentAverageValue, &out.CurrentAverageValue
if *in == nil {
*out = nil
} else {
x := (*in).DeepCopy()
*out = &x
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalMetricStatus.
func (in *ExternalMetricStatus) DeepCopy() *ExternalMetricStatus {
if in == nil {
return nil
}
out := new(ExternalMetricStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *HorizontalPodAutoscaler) DeepCopyInto(out *HorizontalPodAutoscaler) {
*out = *in
@ -66,9 +143,8 @@ func (in *HorizontalPodAutoscaler) DeepCopy() *HorizontalPodAutoscaler {
func (in *HorizontalPodAutoscaler) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -117,9 +193,8 @@ func (in *HorizontalPodAutoscalerList) DeepCopy() *HorizontalPodAutoscalerList {
func (in *HorizontalPodAutoscalerList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -172,8 +247,7 @@ func (in *HorizontalPodAutoscalerStatus) DeepCopyInto(out *HorizontalPodAutoscal
if *in == nil {
*out = nil
} else {
*out = new(v1.Time)
(*in).DeepCopyInto(*out)
*out = (*in).DeepCopy()
}
}
if in.CurrentMetrics != nil {
@ -233,6 +307,15 @@ func (in *MetricSpec) DeepCopyInto(out *MetricSpec) {
(*in).DeepCopyInto(*out)
}
}
if in.External != nil {
in, out := &in.External, &out.External
if *in == nil {
*out = nil
} else {
*out = new(ExternalMetricSource)
(*in).DeepCopyInto(*out)
}
}
return
}
@ -276,6 +359,15 @@ func (in *MetricStatus) DeepCopyInto(out *MetricStatus) {
(*in).DeepCopyInto(*out)
}
}
if in.External != nil {
in, out := &in.External, &out.External
if *in == nil {
*out = nil
} else {
*out = new(ExternalMetricStatus)
(*in).DeepCopyInto(*out)
}
}
return
}
@ -376,8 +468,8 @@ func (in *ResourceMetricSource) DeepCopyInto(out *ResourceMetricSource) {
if *in == nil {
*out = nil
} else {
*out = new(resource.Quantity)
**out = (*in).DeepCopy()
x := (*in).DeepCopy()
*out = &x
}
}
return

View File

@ -16,7 +16,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
// This file was autogenerated by deepcopy-gen. Do not edit it manually!
// Code generated by deepcopy-gen. DO NOT EDIT.
package v1
@ -49,9 +49,8 @@ func (in *Job) DeepCopy() *Job {
func (in *Job) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -101,9 +100,8 @@ func (in *JobList) DeepCopy() *JobList {
func (in *JobList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -192,8 +190,7 @@ func (in *JobStatus) DeepCopyInto(out *JobStatus) {
if *in == nil {
*out = nil
} else {
*out = new(meta_v1.Time)
(*in).DeepCopyInto(*out)
*out = (*in).DeepCopy()
}
}
if in.CompletionTime != nil {
@ -201,8 +198,7 @@ func (in *JobStatus) DeepCopyInto(out *JobStatus) {
if *in == nil {
*out = nil
} else {
*out = new(meta_v1.Time)
(*in).DeepCopyInto(*out)
*out = (*in).DeepCopy()
}
}
return

View File

@ -16,13 +16,12 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
// This file was autogenerated by deepcopy-gen. Do not edit it manually!
// Code generated by deepcopy-gen. DO NOT EDIT.
package v1beta1
import (
v1 "k8s.io/api/core/v1"
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
)
@ -50,9 +49,8 @@ func (in *CronJob) DeepCopy() *CronJob {
func (in *CronJob) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -84,9 +82,8 @@ func (in *CronJobList) DeepCopy() *CronJobList {
func (in *CronJobList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -155,8 +152,7 @@ func (in *CronJobStatus) DeepCopyInto(out *CronJobStatus) {
if *in == nil {
*out = nil
} else {
*out = new(meta_v1.Time)
(*in).DeepCopyInto(*out)
*out = (*in).DeepCopy()
}
}
return
@ -195,9 +191,8 @@ func (in *JobTemplate) DeepCopy() *JobTemplate {
func (in *JobTemplate) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

View File

@ -16,13 +16,12 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
// This file was autogenerated by deepcopy-gen. Do not edit it manually!
// Code generated by deepcopy-gen. DO NOT EDIT.
package v2alpha1
import (
v1 "k8s.io/api/core/v1"
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
)
@ -50,9 +49,8 @@ func (in *CronJob) DeepCopy() *CronJob {
func (in *CronJob) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -84,9 +82,8 @@ func (in *CronJobList) DeepCopy() *CronJobList {
func (in *CronJobList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -155,8 +152,7 @@ func (in *CronJobStatus) DeepCopyInto(out *CronJobStatus) {
if *in == nil {
*out = nil
} else {
*out = new(meta_v1.Time)
(*in).DeepCopyInto(*out)
*out = (*in).DeepCopy()
}
}
return
@ -195,9 +191,8 @@ func (in *JobTemplate) DeepCopy() *JobTemplate {
func (in *JobTemplate) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

View File

@ -16,7 +16,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
// This file was autogenerated by deepcopy-gen. Do not edit it manually!
// Code generated by deepcopy-gen. DO NOT EDIT.
package v1beta1
@ -48,9 +48,8 @@ func (in *CertificateSigningRequest) DeepCopy() *CertificateSigningRequest {
func (in *CertificateSigningRequest) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -99,9 +98,8 @@ func (in *CertificateSigningRequestList) DeepCopy() *CertificateSigningRequestLi
func (in *CertificateSigningRequestList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -126,8 +124,12 @@ func (in *CertificateSigningRequestSpec) DeepCopyInto(out *CertificateSigningReq
in, out := &in.Extra, &out.Extra
*out = make(map[string]ExtraValue, len(*in))
for key, val := range *in {
(*out)[key] = make(ExtraValue, len(val))
copy((*out)[key], val)
if val == nil {
(*out)[key] = nil
} else {
(*out)[key] = make([]string, len(val))
copy((*out)[key], val)
}
}
}
return
@ -170,3 +172,23 @@ func (in *CertificateSigningRequestStatus) DeepCopy() *CertificateSigningRequest
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in ExtraValue) DeepCopyInto(out *ExtraValue) {
{
in := &in
*out = make(ExtraValue, len(*in))
copy(*out, *in)
return
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtraValue.
func (in ExtraValue) DeepCopy() ExtraValue {
if in == nil {
return nil
}
out := new(ExtraValue)
in.DeepCopyInto(out)
return *out
}

File diff suppressed because it is too large Load Diff

View File

@ -170,7 +170,7 @@ message Binding {
optional ObjectReference target = 2;
}
// Represents storage that is managed by an external CSI volume driver
// Represents storage that is managed by an external CSI volume driver (Beta feature)
message CSIPersistentVolumeSource {
// Driver is the name of the driver to use for this volume.
// Required.
@ -191,6 +191,34 @@ message CSIPersistentVolumeSource {
// Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
// +optional
optional string fsType = 4;
// Attributes of the volume to publish.
// +optional
map<string, string> volumeAttributes = 5;
// ControllerPublishSecretRef is a reference to the secret object containing
// sensitive information to pass to the CSI driver to complete the CSI
// ControllerPublishVolume and ControllerUnpublishVolume calls.
// This field is optional, and may be empty if no secret is required. If the
// secret object contains more than one secret, all secrets are passed.
// +optional
optional SecretReference controllerPublishSecretRef = 6;
// NodeStageSecretRef is a reference to the secret object containing sensitive
// information to pass to the CSI driver to complete the CSI NodeStageVolume
// and NodeStageVolume and NodeUnstageVolume calls.
// This field is optional, and may be empty if no secret is required. If the
// secret object contains more than one secret, all secrets are passed.
// +optional
optional SecretReference nodeStageSecretRef = 7;
// NodePublishSecretRef is a reference to the secret object containing
// sensitive information to pass to the CSI driver to complete the CSI
// NodePublishVolume and NodeUnpublishVolume calls.
// This field is optional, and may be empty if no secret is required. If the
// secret object contains more than one secret, all secrets are passed.
// +optional
optional SecretReference nodePublishSecretRef = 8;
}
// Adds and removes POSIX capabilities from running containers.
@ -976,6 +1004,7 @@ message Endpoints {
// subsets for the different ports. No address will appear in both Addresses and
// NotReadyAddresses in the same subset.
// Sets of addresses and ports that comprise a service.
// +optional
repeated EndpointSubset subsets = 2;
}
@ -2528,7 +2557,7 @@ message PersistentVolumeSource {
// +optional
optional StorageOSPersistentVolumeSource storageos = 21;
// CSI represents storage that handled by an external CSI driver
// CSI represents storage that handled by an external CSI driver (Beta feature).
// +optional
optional CSIPersistentVolumeSource csi = 22;
}
@ -2556,8 +2585,9 @@ message PersistentVolumeSpec {
optional ObjectReference claimRef = 4;
// What happens to a persistent volume when released from its claim.
// Valid options are Retain (default) and Recycle.
// Recycling must be supported by the volume plugin underlying this persistent volume.
// Valid options are Retain (default for manually created PersistentVolumes), Delete (default
// for dynamically provisioned PersistentVolumes), and Recycle (deprecated).
// Recycle must be supported by the volume plugin underlying this PersistentVolume.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#reclaiming
// +optional
optional string persistentVolumeReclaimPolicy = 5;
@ -2578,6 +2608,11 @@ message PersistentVolumeSpec {
// This is an alpha feature and may change in the future.
// +optional
optional string volumeMode = 8;
// NodeAffinity defines constraints that limit what nodes this volume can be accessed from.
// This field influences the scheduling of pods that use this volume.
// +optional
optional VolumeNodeAffinity nodeAffinity = 9;
}
// PersistentVolumeStatus is the current status of a persistent volume.
@ -3028,7 +3063,6 @@ message PodSpec {
// DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy.
// To have DNS options set along with hostNetwork, you have to specify DNS policy
// explicitly to 'ClusterFirstWithHostNet'.
// Note that 'None' policy is an alpha feature introduced in v1.9 and CustomPodDNS feature gate must be enabled to use it.
// +optional
optional string dnsPolicy = 6;
@ -3078,6 +3112,16 @@ message PodSpec {
// +optional
optional bool hostIPC = 13;
// Share a single process namespace between all of the containers in a pod.
// When this is set containers will be able to view and signal processes from other containers
// in the same pod, and the first process in each container will not be assigned PID 1.
// HostPID and ShareProcessNamespace cannot both be set.
// Optional: Default to false.
// This field is alpha-level and is honored only by servers that enable the PodShareProcessNamespace feature.
// +k8s:conversion-gen=false
// +optional
optional bool shareProcessNamespace = 27;
// SecurityContext holds pod-level security attributes and common container settings.
// Optional: Defaults to empty. See type description for default values of each field.
// +optional
@ -3142,7 +3186,6 @@ message PodSpec {
// Specifies the DNS parameters of a pod.
// Parameters specified here will be merged to the generated DNS
// configuration based on DNSPolicy.
// This is an alpha feature introduced in v1.9 and CustomPodDNS feature gate must be enabled to use it.
// +optional
optional PodDNSConfig dnsConfig = 26;
}
@ -4457,6 +4500,12 @@ message VolumeMount {
optional string mountPropagation = 5;
}
// VolumeNodeAffinity defines constraints that limit what nodes this volume can be accessed from.
message VolumeNodeAffinity {
// Required specifies hard node constraints that must be met.
optional NodeSelector required = 1;
}
// Projection that may be projected along with other supported volume types
message VolumeProjection {
// information about the secret data to project

73
vendor/k8s.io/api/core/v1/types.go generated vendored
View File

@ -446,7 +446,7 @@ type PersistentVolumeSource struct {
// More info: https://releases.k8s.io/HEAD/examples/volumes/storageos/README.md
// +optional
StorageOS *StorageOSPersistentVolumeSource `json:"storageos,omitempty" protobuf:"bytes,21,opt,name=storageos"`
// CSI represents storage that handled by an external CSI driver
// CSI represents storage that handled by an external CSI driver (Beta feature).
// +optional
CSI *CSIPersistentVolumeSource `json:"csi,omitempty" protobuf:"bytes,22,opt,name=csi"`
}
@ -511,8 +511,9 @@ type PersistentVolumeSpec struct {
// +optional
ClaimRef *ObjectReference `json:"claimRef,omitempty" protobuf:"bytes,4,opt,name=claimRef"`
// What happens to a persistent volume when released from its claim.
// Valid options are Retain (default) and Recycle.
// Recycling must be supported by the volume plugin underlying this persistent volume.
// Valid options are Retain (default for manually created PersistentVolumes), Delete (default
// for dynamically provisioned PersistentVolumes), and Recycle (deprecated).
// Recycle must be supported by the volume plugin underlying this PersistentVolume.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#reclaiming
// +optional
PersistentVolumeReclaimPolicy PersistentVolumeReclaimPolicy `json:"persistentVolumeReclaimPolicy,omitempty" protobuf:"bytes,5,opt,name=persistentVolumeReclaimPolicy,casttype=PersistentVolumeReclaimPolicy"`
@ -530,6 +531,16 @@ type PersistentVolumeSpec struct {
// This is an alpha feature and may change in the future.
// +optional
VolumeMode *PersistentVolumeMode `json:"volumeMode,omitempty" protobuf:"bytes,8,opt,name=volumeMode,casttype=PersistentVolumeMode"`
// NodeAffinity defines constraints that limit what nodes this volume can be accessed from.
// This field influences the scheduling of pods that use this volume.
// +optional
NodeAffinity *VolumeNodeAffinity `json:"nodeAffinity,omitempty" protobuf:"bytes,9,opt,name=nodeAffinity"`
}
// VolumeNodeAffinity defines constraints that limit what nodes this volume can be accessed from.
type VolumeNodeAffinity struct {
// Required specifies hard node constraints that must be met.
Required *NodeSelector `json:"required,omitempty" protobuf:"bytes,1,opt,name=required"`
}
// PersistentVolumeReclaimPolicy describes a policy for end-of-life maintenance of persistent volumes.
@ -1010,8 +1021,8 @@ type FlockerVolumeSource struct {
type StorageMedium string
const (
StorageMediumDefault StorageMedium = "" // use whatever the default is for the node
StorageMediumMemory StorageMedium = "Memory" // use memory (tmpfs)
StorageMediumDefault StorageMedium = "" // use whatever the default is for the node, assume anything we don't explicitly handle is this
StorageMediumMemory StorageMedium = "Memory" // use memory (e.g. tmpfs on linux)
StorageMediumHugePages StorageMedium = "HugePages" // use hugepages
)
@ -1717,7 +1728,7 @@ type LocalVolumeSource struct {
Path string `json:"path" protobuf:"bytes,1,opt,name=path"`
}
// Represents storage that is managed by an external CSI volume driver
// Represents storage that is managed by an external CSI volume driver (Beta feature)
type CSIPersistentVolumeSource struct {
// Driver is the name of the driver to use for this volume.
// Required.
@ -1738,6 +1749,34 @@ type CSIPersistentVolumeSource struct {
// Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
// +optional
FSType string `json:"fsType,omitempty" protobuf:"bytes,4,opt,name=fsType"`
// Attributes of the volume to publish.
// +optional
VolumeAttributes map[string]string `json:"volumeAttributes,omitempty" protobuf:"bytes,5,rep,name=volumeAttributes"`
// ControllerPublishSecretRef is a reference to the secret object containing
// sensitive information to pass to the CSI driver to complete the CSI
// ControllerPublishVolume and ControllerUnpublishVolume calls.
// This field is optional, and may be empty if no secret is required. If the
// secret object contains more than one secret, all secrets are passed.
// +optional
ControllerPublishSecretRef *SecretReference `json:"controllerPublishSecretRef,omitempty" protobuf:"bytes,6,opt,name=controllerPublishSecretRef"`
// NodeStageSecretRef is a reference to the secret object containing sensitive
// information to pass to the CSI driver to complete the CSI NodeStageVolume
// and NodeStageVolume and NodeUnstageVolume calls.
// This field is optional, and may be empty if no secret is required. If the
// secret object contains more than one secret, all secrets are passed.
// +optional
NodeStageSecretRef *SecretReference `json:"nodeStageSecretRef,omitempty" protobuf:"bytes,7,opt,name=nodeStageSecretRef"`
// NodePublishSecretRef is a reference to the secret object containing
// sensitive information to pass to the CSI driver to complete the CSI
// NodePublishVolume and NodeUnpublishVolume calls.
// This field is optional, and may be empty if no secret is required. If the
// secret object contains more than one secret, all secrets are passed.
// +optional
NodePublishSecretRef *SecretReference `json:"nodePublishSecretRef,omitempty" protobuf:"bytes,8,opt,name=nodePublishSecretRef"`
}
// ContainerPort represents a network port in a single container.
@ -2808,7 +2847,6 @@ type PodSpec struct {
// DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy.
// To have DNS options set along with hostNetwork, you have to specify DNS policy
// explicitly to 'ClusterFirstWithHostNet'.
// Note that 'None' policy is an alpha feature introduced in v1.9 and CustomPodDNS feature gate must be enabled to use it.
// +optional
DNSPolicy DNSPolicy `json:"dnsPolicy,omitempty" protobuf:"bytes,6,opt,name=dnsPolicy,casttype=DNSPolicy"`
// NodeSelector is a selector which must be true for the pod to fit on a node.
@ -2851,6 +2889,15 @@ type PodSpec struct {
// +k8s:conversion-gen=false
// +optional
HostIPC bool `json:"hostIPC,omitempty" protobuf:"varint,13,opt,name=hostIPC"`
// Share a single process namespace between all of the containers in a pod.
// When this is set containers will be able to view and signal processes from other containers
// in the same pod, and the first process in each container will not be assigned PID 1.
// HostPID and ShareProcessNamespace cannot both be set.
// Optional: Default to false.
// This field is alpha-level and is honored only by servers that enable the PodShareProcessNamespace feature.
// +k8s:conversion-gen=false
// +optional
ShareProcessNamespace *bool `json:"shareProcessNamespace,omitempty" protobuf:"varint,27,opt,name=shareProcessNamespace"`
// SecurityContext holds pod-level security attributes and common container settings.
// Optional: Defaults to empty. See type description for default values of each field.
// +optional
@ -2905,7 +2952,6 @@ type PodSpec struct {
// Specifies the DNS parameters of a pod.
// Parameters specified here will be merged to the generated DNS
// configuration based on DNSPolicy.
// This is an alpha feature introduced in v1.9 and CustomPodDNS feature gate must be enabled to use it.
// +optional
DNSConfig *PodDNSConfig `json:"dnsConfig,omitempty" protobuf:"bytes,26,opt,name=dnsConfig"`
}
@ -3675,7 +3721,8 @@ type Endpoints struct {
// subsets for the different ports. No address will appear in both Addresses and
// NotReadyAddresses in the same subset.
// Sets of addresses and ports that comprise a service.
Subsets []EndpointSubset `json:"subsets" protobuf:"bytes,2,rep,name=subsets"`
// +optional
Subsets []EndpointSubset `json:"subsets,omitempty" protobuf:"bytes,2,rep,name=subsets"`
}
// EndpointSubset is a group of addresses with a common set of ports. The
@ -3962,10 +4009,12 @@ const (
NodeMemoryPressure NodeConditionType = "MemoryPressure"
// NodeDiskPressure means the kubelet is under pressure due to insufficient available disk.
NodeDiskPressure NodeConditionType = "DiskPressure"
// NodePIDPressure means the kubelet is under pressure due to insufficient available PID.
NodePIDPressure NodeConditionType = "PIDPressure"
// NodeNetworkUnavailable means that network for the node is not correctly configured.
NodeNetworkUnavailable NodeConditionType = "NetworkUnavailable"
// NodeConfigOK indicates whether the kubelet is correctly configured
NodeConfigOK NodeConditionType = "ConfigOK"
// NodeKubeletConfigOk indicates whether the kubelet is correctly configured
NodeKubeletConfigOk NodeConditionType = "KubeletConfigOk"
)
// NodeCondition contains condition information for a node.
@ -4733,6 +4782,8 @@ const (
// HugePages request, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024)
// As burst is not supported for HugePages, we would only quota its request, and ignore the limit.
ResourceRequestsHugePagesPrefix = "requests.hugepages-"
// Default resource requests prefix
DefaultResourceRequestsPrefix = "requests."
)
// A ResourceQuotaScope defines a filter that must match each object tracked by a quota

View File

@ -117,11 +117,15 @@ func (Binding) SwaggerDoc() map[string]string {
}
var map_CSIPersistentVolumeSource = map[string]string{
"": "Represents storage that is managed by an external CSI volume driver",
"driver": "Driver is the name of the driver to use for this volume. Required.",
"volumeHandle": "VolumeHandle is the unique volume name returned by the CSI volume plugins CreateVolume to refer to the volume on all subsequent calls. Required.",
"readOnly": "Optional: The value to pass to ControllerPublishVolumeRequest. Defaults to false (read/write).",
"fsType": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.",
"": "Represents storage that is managed by an external CSI volume driver (Beta feature)",
"driver": "Driver is the name of the driver to use for this volume. Required.",
"volumeHandle": "VolumeHandle is the unique volume name returned by the CSI volume plugins CreateVolume to refer to the volume on all subsequent calls. Required.",
"readOnly": "Optional: The value to pass to ControllerPublishVolumeRequest. Defaults to false (read/write).",
"fsType": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.",
"volumeAttributes": "Attributes of the volume to publish.",
"controllerPublishSecretRef": "ControllerPublishSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI ControllerPublishVolume and ControllerUnpublishVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed.",
"nodeStageSecretRef": "NodeStageSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodeStageVolume and NodeStageVolume and NodeUnstageVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed.",
"nodePublishSecretRef": "NodePublishSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodePublishVolume and NodeUnpublishVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed.",
}
func (CSIPersistentVolumeSource) SwaggerDoc() map[string]string {
@ -1276,7 +1280,7 @@ var map_PersistentVolumeSource = map[string]string{
"scaleIO": "ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.",
"local": "Local represents directly-attached storage with node affinity",
"storageos": "StorageOS represents a StorageOS volume that is attached to the kubelet's host machine and mounted into the pod More info: https://releases.k8s.io/HEAD/examples/volumes/storageos/README.md",
"csi": "CSI represents storage that handled by an external CSI driver",
"csi": "CSI represents storage that handled by an external CSI driver (Beta feature).",
}
func (PersistentVolumeSource) SwaggerDoc() map[string]string {
@ -1288,10 +1292,11 @@ var map_PersistentVolumeSpec = map[string]string{
"capacity": "A description of the persistent volume's resources and capacity. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity",
"accessModes": "AccessModes contains all ways the volume can be mounted. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes",
"claimRef": "ClaimRef is part of a bi-directional binding between PersistentVolume and PersistentVolumeClaim. Expected to be non-nil when bound. claim.VolumeName is the authoritative bind between PV and PVC. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#binding",
"persistentVolumeReclaimPolicy": "What happens to a persistent volume when released from its claim. Valid options are Retain (default) and Recycle. Recycling must be supported by the volume plugin underlying this persistent volume. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#reclaiming",
"persistentVolumeReclaimPolicy": "What happens to a persistent volume when released from its claim. Valid options are Retain (default for manually created PersistentVolumes), Delete (default for dynamically provisioned PersistentVolumes), and Recycle (deprecated). Recycle must be supported by the volume plugin underlying this PersistentVolume. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#reclaiming",
"storageClassName": "Name of StorageClass to which this persistent volume belongs. Empty value means that this volume does not belong to any StorageClass.",
"mountOptions": "A list of mount options, e.g. [\"ro\", \"soft\"]. Not validated - mount will simply fail if one is invalid. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#mount-options",
"volumeMode": "volumeMode defines if a volume is intended to be used with a formatted filesystem or to remain in raw block state. Value of Filesystem is implied when not included in spec. This is an alpha feature and may change in the future.",
"nodeAffinity": "NodeAffinity defines constraints that limit what nodes this volume can be accessed from. This field influences the scheduling of pods that use this volume.",
}
func (PersistentVolumeSpec) SwaggerDoc() map[string]string {
@ -1496,7 +1501,7 @@ var map_PodSpec = map[string]string{
"restartPolicy": "Restart policy for all containers within the pod. One of Always, OnFailure, Never. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy",
"terminationGracePeriodSeconds": "Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period will be used instead. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. Defaults to 30 seconds.",
"activeDeadlineSeconds": "Optional duration in seconds the pod may be active on the node relative to StartTime before the system will actively try to mark it failed and kill associated containers. Value must be a positive integer.",
"dnsPolicy": "Set DNS policy for the pod. Defaults to \"ClusterFirst\". Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. To have DNS options set along with hostNetwork, you have to specify DNS policy explicitly to 'ClusterFirstWithHostNet'. Note that 'None' policy is an alpha feature introduced in v1.9 and CustomPodDNS feature gate must be enabled to use it.",
"dnsPolicy": "Set DNS policy for the pod. Defaults to \"ClusterFirst\". Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. To have DNS options set along with hostNetwork, you have to specify DNS policy explicitly to 'ClusterFirstWithHostNet'.",
"nodeSelector": "NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/",
"serviceAccountName": "ServiceAccountName is the name of the ServiceAccount to use to run this pod. More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/",
"serviceAccount": "DeprecatedServiceAccount is a depreciated alias for ServiceAccountName. Deprecated: Use serviceAccountName instead.",
@ -1505,6 +1510,7 @@ var map_PodSpec = map[string]string{
"hostNetwork": "Host networking requested for this pod. Use the host's network namespace. If this option is set, the ports that will be used must be specified. Default to false.",
"hostPID": "Use the host's pid namespace. Optional: Default to false.",
"hostIPC": "Use the host's ipc namespace. Optional: Default to false.",
"shareProcessNamespace": "Share a single process namespace between all of the containers in a pod. When this is set containers will be able to view and signal processes from other containers in the same pod, and the first process in each container will not be assigned PID 1. HostPID and ShareProcessNamespace cannot both be set. Optional: Default to false. This field is alpha-level and is honored only by servers that enable the PodShareProcessNamespace feature.",
"securityContext": "SecurityContext holds pod-level security attributes and common container settings. Optional: Defaults to empty. See type description for default values of each field.",
"imagePullSecrets": "ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod",
"hostname": "Specifies the hostname of the Pod If not specified, the pod's hostname will be set to a system-defined value.",
@ -1515,7 +1521,7 @@ var map_PodSpec = map[string]string{
"hostAliases": "HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts file if specified. This is only valid for non-hostNetwork pods.",
"priorityClassName": "If specified, indicates the pod's priority. \"system-node-critical\" and \"system-cluster-critical\" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default.",
"priority": "The priority value. Various system components use this field to find the priority of the pod. When Priority Admission Controller is enabled, it prevents users from setting this field. The admission controller populates this field from PriorityClassName. The higher the value, the higher the priority.",
"dnsConfig": "Specifies the DNS parameters of a pod. Parameters specified here will be merged to the generated DNS configuration based on DNSPolicy. This is an alpha feature introduced in v1.9 and CustomPodDNS feature gate must be enabled to use it.",
"dnsConfig": "Specifies the DNS parameters of a pod. Parameters specified here will be merged to the generated DNS configuration based on DNSPolicy.",
}
func (PodSpec) SwaggerDoc() map[string]string {
@ -2176,6 +2182,15 @@ func (VolumeMount) SwaggerDoc() map[string]string {
return map_VolumeMount
}
var map_VolumeNodeAffinity = map[string]string{
"": "VolumeNodeAffinity defines constraints that limit what nodes this volume can be accessed from.",
"required": "Required specifies hard node constraints that must be met.",
}
func (VolumeNodeAffinity) SwaggerDoc() map[string]string {
return map_VolumeNodeAffinity
}
var map_VolumeProjection = map[string]string{
"": "Projection that may be projected along with other supported volume types",
"secret": "information about the secret data to project",

View File

@ -16,12 +16,11 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
// This file was autogenerated by deepcopy-gen. Do not edit it manually!
// Code generated by deepcopy-gen. DO NOT EDIT.
package v1
import (
resource "k8s.io/apimachinery/pkg/api/resource"
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
types "k8s.io/apimachinery/pkg/types"
@ -241,14 +240,47 @@ func (in *Binding) DeepCopy() *Binding {
func (in *Binding) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CSIPersistentVolumeSource) DeepCopyInto(out *CSIPersistentVolumeSource) {
*out = *in
if in.VolumeAttributes != nil {
in, out := &in.VolumeAttributes, &out.VolumeAttributes
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.ControllerPublishSecretRef != nil {
in, out := &in.ControllerPublishSecretRef, &out.ControllerPublishSecretRef
if *in == nil {
*out = nil
} else {
*out = new(SecretReference)
**out = **in
}
}
if in.NodeStageSecretRef != nil {
in, out := &in.NodeStageSecretRef, &out.NodeStageSecretRef
if *in == nil {
*out = nil
} else {
*out = new(SecretReference)
**out = **in
}
}
if in.NodePublishSecretRef != nil {
in, out := &in.NodePublishSecretRef, &out.NodePublishSecretRef
if *in == nil {
*out = nil
} else {
*out = new(SecretReference)
**out = **in
}
}
return
}
@ -432,9 +464,8 @@ func (in *ComponentStatus) DeepCopy() *ComponentStatus {
func (in *ComponentStatus) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -466,9 +497,8 @@ func (in *ComponentStatusList) DeepCopy() *ComponentStatusList {
func (in *ComponentStatusList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -512,9 +542,8 @@ func (in *ConfigMap) DeepCopy() *ConfigMap {
func (in *ConfigMap) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -598,9 +627,8 @@ func (in *ConfigMapList) DeepCopy() *ConfigMapList {
func (in *ConfigMapList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -994,9 +1022,8 @@ func (in *DeleteOptions) DeepCopy() *DeleteOptions {
func (in *DeleteOptions) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -1105,8 +1132,8 @@ func (in *EmptyDirVolumeSource) DeepCopyInto(out *EmptyDirVolumeSource) {
if *in == nil {
*out = nil
} else {
*out = new(resource.Quantity)
**out = (*in).DeepCopy()
x := (*in).DeepCopy()
*out = &x
}
}
return
@ -1236,9 +1263,8 @@ func (in *Endpoints) DeepCopy() *Endpoints {
func (in *Endpoints) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -1270,9 +1296,8 @@ func (in *EndpointsList) DeepCopy() *EndpointsList {
func (in *EndpointsList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -1431,9 +1456,8 @@ func (in *Event) DeepCopy() *Event {
func (in *Event) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -1465,9 +1489,8 @@ func (in *EventList) DeepCopy() *EventList {
func (in *EventList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -1974,9 +1997,8 @@ func (in *LimitRange) DeepCopy() *LimitRange {
func (in *LimitRange) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -2059,9 +2081,8 @@ func (in *LimitRangeList) DeepCopy() *LimitRangeList {
func (in *LimitRangeList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -2116,9 +2137,8 @@ func (in *List) DeepCopy() *List {
func (in *List) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -2151,9 +2171,8 @@ func (in *ListOptions) DeepCopy() *ListOptions {
func (in *ListOptions) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -2265,9 +2284,8 @@ func (in *Namespace) DeepCopy() *Namespace {
func (in *Namespace) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -2299,9 +2317,8 @@ func (in *NamespaceList) DeepCopy() *NamespaceList {
func (in *NamespaceList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -2365,9 +2382,8 @@ func (in *Node) DeepCopy() *Node {
func (in *Node) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -2466,9 +2482,8 @@ func (in *NodeConfigSource) DeepCopy() *NodeConfigSource {
func (in *NodeConfigSource) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -2517,9 +2532,8 @@ func (in *NodeList) DeepCopy() *NodeList {
func (in *NodeList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -2543,9 +2557,8 @@ func (in *NodeProxyOptions) DeepCopy() *NodeProxyOptions {
func (in *NodeProxyOptions) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -2772,8 +2785,7 @@ func (in *ObjectMeta) DeepCopyInto(out *ObjectMeta) {
if *in == nil {
*out = nil
} else {
*out = new(meta_v1.Time)
(*in).DeepCopyInto(*out)
*out = (*in).DeepCopy()
}
}
if in.DeletionGracePeriodSeconds != nil {
@ -2853,9 +2865,8 @@ func (in *ObjectReference) DeepCopy() *ObjectReference {
func (in *ObjectReference) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -2882,9 +2893,8 @@ func (in *PersistentVolume) DeepCopy() *PersistentVolume {
func (in *PersistentVolume) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -2911,9 +2921,8 @@ func (in *PersistentVolumeClaim) DeepCopy() *PersistentVolumeClaim {
func (in *PersistentVolumeClaim) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -2963,9 +2972,8 @@ func (in *PersistentVolumeClaimList) DeepCopy() *PersistentVolumeClaimList {
func (in *PersistentVolumeClaimList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -3097,9 +3105,8 @@ func (in *PersistentVolumeList) DeepCopy() *PersistentVolumeList {
func (in *PersistentVolumeList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -3300,7 +3307,7 @@ func (in *PersistentVolumeSource) DeepCopyInto(out *PersistentVolumeSource) {
*out = nil
} else {
*out = new(CSIPersistentVolumeSource)
**out = **in
(*in).DeepCopyInto(*out)
}
}
return
@ -3355,6 +3362,15 @@ func (in *PersistentVolumeSpec) DeepCopyInto(out *PersistentVolumeSpec) {
**out = **in
}
}
if in.NodeAffinity != nil {
in, out := &in.NodeAffinity, &out.NodeAffinity
if *in == nil {
*out = nil
} else {
*out = new(VolumeNodeAffinity)
(*in).DeepCopyInto(*out)
}
}
return
}
@ -3424,9 +3440,8 @@ func (in *Pod) DeepCopy() *Pod {
func (in *Pod) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -3540,9 +3555,8 @@ func (in *PodAttachOptions) DeepCopy() *PodAttachOptions {
func (in *PodAttachOptions) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -3647,9 +3661,8 @@ func (in *PodExecOptions) DeepCopy() *PodExecOptions {
func (in *PodExecOptions) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -3681,9 +3694,8 @@ func (in *PodList) DeepCopy() *PodList {
func (in *PodList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -3704,8 +3716,7 @@ func (in *PodLogOptions) DeepCopyInto(out *PodLogOptions) {
if *in == nil {
*out = nil
} else {
*out = new(meta_v1.Time)
(*in).DeepCopyInto(*out)
*out = (*in).DeepCopy()
}
}
if in.TailLines != nil {
@ -3743,9 +3754,8 @@ func (in *PodLogOptions) DeepCopy() *PodLogOptions {
func (in *PodLogOptions) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -3774,9 +3784,8 @@ func (in *PodPortForwardOptions) DeepCopy() *PodPortForwardOptions {
func (in *PodPortForwardOptions) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -3800,9 +3809,8 @@ func (in *PodProxyOptions) DeepCopy() *PodProxyOptions {
func (in *PodProxyOptions) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -3945,6 +3953,15 @@ func (in *PodSpec) DeepCopyInto(out *PodSpec) {
**out = **in
}
}
if in.ShareProcessNamespace != nil {
in, out := &in.ShareProcessNamespace, &out.ShareProcessNamespace
if *in == nil {
*out = nil
} else {
*out = new(bool)
**out = **in
}
}
if in.SecurityContext != nil {
in, out := &in.SecurityContext, &out.SecurityContext
if *in == nil {
@ -4028,8 +4045,7 @@ func (in *PodStatus) DeepCopyInto(out *PodStatus) {
if *in == nil {
*out = nil
} else {
*out = new(meta_v1.Time)
(*in).DeepCopyInto(*out)
*out = (*in).DeepCopy()
}
}
if in.InitContainerStatuses != nil {
@ -4082,9 +4098,8 @@ func (in *PodStatusResult) DeepCopy() *PodStatusResult {
func (in *PodStatusResult) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -4110,9 +4125,8 @@ func (in *PodTemplate) DeepCopy() *PodTemplate {
func (in *PodTemplate) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -4144,9 +4158,8 @@ func (in *PodTemplateList) DeepCopy() *PodTemplateList {
func (in *PodTemplateList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -4395,9 +4408,8 @@ func (in *RangeAllocation) DeepCopy() *RangeAllocation {
func (in *RangeAllocation) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -4424,9 +4436,8 @@ func (in *ReplicationController) DeepCopy() *ReplicationController {
func (in *ReplicationController) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -4475,9 +4486,8 @@ func (in *ReplicationControllerList) DeepCopy() *ReplicationControllerList {
func (in *ReplicationControllerList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -4561,6 +4571,28 @@ func (in *ResourceFieldSelector) DeepCopy() *ResourceFieldSelector {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in ResourceList) DeepCopyInto(out *ResourceList) {
{
in := &in
*out = make(ResourceList, len(*in))
for key, val := range *in {
(*out)[key] = val.DeepCopy()
}
return
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceList.
func (in ResourceList) DeepCopy() ResourceList {
if in == nil {
return nil
}
out := new(ResourceList)
in.DeepCopyInto(out)
return *out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ResourceQuota) DeepCopyInto(out *ResourceQuota) {
*out = *in
@ -4585,9 +4617,8 @@ func (in *ResourceQuota) DeepCopy() *ResourceQuota {
func (in *ResourceQuota) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -4619,9 +4650,8 @@ func (in *ResourceQuotaList) DeepCopy() *ResourceQuotaList {
func (in *ResourceQuotaList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -4819,9 +4849,8 @@ func (in *Secret) DeepCopy() *Secret {
func (in *Secret) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -4905,9 +4934,8 @@ func (in *SecretList) DeepCopy() *SecretList {
func (in *SecretList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -5101,9 +5129,8 @@ func (in *SerializedReference) DeepCopy() *SerializedReference {
func (in *SerializedReference) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -5130,9 +5157,8 @@ func (in *Service) DeepCopy() *Service {
func (in *Service) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -5176,9 +5202,8 @@ func (in *ServiceAccount) DeepCopy() *ServiceAccount {
func (in *ServiceAccount) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -5210,9 +5235,8 @@ func (in *ServiceAccountList) DeepCopy() *ServiceAccountList {
func (in *ServiceAccountList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -5244,9 +5268,8 @@ func (in *ServiceList) DeepCopy() *ServiceList {
func (in *ServiceList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -5287,9 +5310,8 @@ func (in *ServiceProxyOptions) DeepCopy() *ServiceProxyOptions {
func (in *ServiceProxyOptions) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -5472,8 +5494,7 @@ func (in *Taint) DeepCopyInto(out *Taint) {
if *in == nil {
*out = nil
} else {
*out = new(meta_v1.Time)
(*in).DeepCopyInto(*out)
*out = (*in).DeepCopy()
}
}
return
@ -5572,6 +5593,31 @@ func (in *VolumeMount) DeepCopy() *VolumeMount {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *VolumeNodeAffinity) DeepCopyInto(out *VolumeNodeAffinity) {
*out = *in
if in.Required != nil {
in, out := &in.Required, &out.Required
if *in == nil {
*out = nil
} else {
*out = new(NodeSelector)
(*in).DeepCopyInto(*out)
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeNodeAffinity.
func (in *VolumeNodeAffinity) DeepCopy() *VolumeNodeAffinity {
if in == nil {
return nil
}
out := new(VolumeNodeAffinity)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *VolumeProjection) DeepCopyInto(out *VolumeProjection) {
*out = *in

View File

@ -16,7 +16,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
// This file was autogenerated by deepcopy-gen. Do not edit it manually!
// Code generated by deepcopy-gen. DO NOT EDIT.
package v1beta1
@ -70,9 +70,8 @@ func (in *Event) DeepCopy() *Event {
func (in *Event) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -104,9 +103,8 @@ func (in *EventList) DeepCopy() *EventList {
func (in *EventList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

View File

@ -16,7 +16,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
// This file was autogenerated by deepcopy-gen. Do not edit it manually!
// Code generated by deepcopy-gen. DO NOT EDIT.
package v1beta1
@ -163,9 +163,8 @@ func (in *DaemonSet) DeepCopy() *DaemonSet {
func (in *DaemonSet) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -214,9 +213,8 @@ func (in *DaemonSetList) DeepCopy() *DaemonSetList {
func (in *DaemonSetList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -336,9 +334,8 @@ func (in *Deployment) DeepCopy() *Deployment {
func (in *Deployment) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -388,9 +385,8 @@ func (in *DeploymentList) DeepCopy() *DeploymentList {
func (in *DeploymentList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -422,9 +418,8 @@ func (in *DeploymentRollback) DeepCopy() *DeploymentRollback {
func (in *DeploymentRollback) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -683,9 +678,8 @@ func (in *Ingress) DeepCopy() *Ingress {
func (in *Ingress) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -734,9 +728,8 @@ func (in *IngressList) DeepCopy() *IngressList {
func (in *IngressList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -881,9 +874,8 @@ func (in *NetworkPolicy) DeepCopy() *NetworkPolicy {
func (in *NetworkPolicy) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -975,9 +967,8 @@ func (in *NetworkPolicyList) DeepCopy() *NetworkPolicyList {
func (in *NetworkPolicyList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -1116,9 +1107,8 @@ func (in *PodSecurityPolicy) DeepCopy() *PodSecurityPolicy {
func (in *PodSecurityPolicy) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -1150,9 +1140,8 @@ func (in *PodSecurityPolicyList) DeepCopy() *PodSecurityPolicyList {
func (in *PodSecurityPolicyList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -1252,9 +1241,8 @@ func (in *ReplicaSet) DeepCopy() *ReplicaSet {
func (in *ReplicaSet) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -1303,9 +1291,8 @@ func (in *ReplicaSetList) DeepCopy() *ReplicaSetList {
func (in *ReplicaSetList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -1387,9 +1374,8 @@ func (in *ReplicationControllerDummy) DeepCopy() *ReplicationControllerDummy {
func (in *ReplicationControllerDummy) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -1537,9 +1523,8 @@ func (in *Scale) DeepCopy() *Scale {
func (in *Scale) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

View File

@ -16,7 +16,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
// This file was autogenerated by deepcopy-gen. Do not edit it manually!
// Code generated by deepcopy-gen. DO NOT EDIT.
package v1
@ -71,9 +71,8 @@ func (in *NetworkPolicy) DeepCopy() *NetworkPolicy {
func (in *NetworkPolicy) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -165,9 +164,8 @@ func (in *NetworkPolicyList) DeepCopy() *NetworkPolicyList {
func (in *NetworkPolicyList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

File diff suppressed because it is too large Load Diff

View File

@ -30,6 +30,25 @@ import "k8s.io/apimachinery/pkg/util/intstr/generated.proto";
// Package-wide variables from generator "generated".
option go_package = "v1beta1";
// AllowedFlexVolume represents a single Flexvolume that is allowed to be used.
message AllowedFlexVolume {
// Driver is the name of the Flexvolume driver.
optional string driver = 1;
}
// defines the host volume conditions that will be enabled by a policy
// for pods to use. It requires the path prefix to be defined.
message AllowedHostPath {
// is the path prefix that the host volume must match.
// It does not support `*`.
// Trailing slashes are trimmed when validating the path prefix with a host path.
//
// Examples:
// `/foo` would allow `/foo`, `/foo/` and `/foo/bar`
// `/foo` would not allow `/food` or `/etc/foo`
optional string pathPrefix = 1;
}
// Eviction evicts a pod from its node subject to certain policies and safety constraints.
// This is a subresource of Pod. A request to cause such an eviction is
// created by POSTing to .../pods/<pod name>/evictions.
@ -41,6 +60,37 @@ message Eviction {
optional k8s.io.apimachinery.pkg.apis.meta.v1.DeleteOptions deleteOptions = 2;
}
// FSGroupStrategyOptions defines the strategy type and options used to create the strategy.
message FSGroupStrategyOptions {
// Rule is the strategy that will dictate what FSGroup is used in the SecurityContext.
// +optional
optional string rule = 1;
// Ranges are the allowed ranges of fs groups. If you would like to force a single
// fs group then supply a single range with the same start and end.
// +optional
repeated IDRange ranges = 2;
}
// Host Port Range defines a range of host ports that will be enabled by a policy
// for pods to use. It requires both the start and end to be defined.
message HostPortRange {
// min is the start of the range, inclusive.
optional int32 min = 1;
// max is the end of the range, inclusive.
optional int32 max = 2;
}
// ID Range provides a min/max of an allowed range of IDs.
message IDRange {
// Min is the start of the range, inclusive.
optional int64 min = 1;
// Max is the end of the range, inclusive.
optional int64 max = 2;
}
// PodDisruptionBudget is an object to define the max disruption that can be caused to a collection of pods
message PodDisruptionBudget {
optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
@ -112,3 +162,146 @@ message PodDisruptionBudgetStatus {
optional int32 expectedPods = 6;
}
// Pod Security Policy governs the ability to make requests that affect the Security Context
// that will be applied to a pod and container.
message PodSecurityPolicy {
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
// spec defines the policy enforced.
// +optional
optional PodSecurityPolicySpec spec = 2;
}
// Pod Security Policy List is a list of PodSecurityPolicy objects.
message PodSecurityPolicyList {
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
// Items is a list of schema objects.
repeated PodSecurityPolicy items = 2;
}
// Pod Security Policy Spec defines the policy enforced.
message PodSecurityPolicySpec {
// privileged determines if a pod can request to be run as privileged.
// +optional
optional bool privileged = 1;
// DefaultAddCapabilities is the default set of capabilities that will be added to the container
// unless the pod spec specifically drops the capability. You may not list a capability in both
// DefaultAddCapabilities and RequiredDropCapabilities. Capabilities added here are implicitly
// allowed, and need not be included in the AllowedCapabilities list.
// +optional
repeated string defaultAddCapabilities = 2;
// RequiredDropCapabilities are the capabilities that will be dropped from the container. These
// are required to be dropped and cannot be added.
// +optional
repeated string requiredDropCapabilities = 3;
// AllowedCapabilities is a list of capabilities that can be requested to add to the container.
// Capabilities in this field may be added at the pod author's discretion.
// You must not list a capability in both AllowedCapabilities and RequiredDropCapabilities.
// +optional
repeated string allowedCapabilities = 4;
// volumes is a white list of allowed volume plugins. Empty indicates that all plugins
// may be used.
// +optional
repeated string volumes = 5;
// hostNetwork determines if the policy allows the use of HostNetwork in the pod spec.
// +optional
optional bool hostNetwork = 6;
// hostPorts determines which host port ranges are allowed to be exposed.
// +optional
repeated HostPortRange hostPorts = 7;
// hostPID determines if the policy allows the use of HostPID in the pod spec.
// +optional
optional bool hostPID = 8;
// hostIPC determines if the policy allows the use of HostIPC in the pod spec.
// +optional
optional bool hostIPC = 9;
// seLinux is the strategy that will dictate the allowable labels that may be set.
optional SELinuxStrategyOptions seLinux = 10;
// runAsUser is the strategy that will dictate the allowable RunAsUser values that may be set.
optional RunAsUserStrategyOptions runAsUser = 11;
// SupplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext.
optional SupplementalGroupsStrategyOptions supplementalGroups = 12;
// FSGroup is the strategy that will dictate what fs group is used by the SecurityContext.
optional FSGroupStrategyOptions fsGroup = 13;
// ReadOnlyRootFilesystem when set to true will force containers to run with a read only root file
// system. If the container specifically requests to run with a non-read only root file system
// the PSP should deny the pod.
// If set to false the container may run with a read only root file system if it wishes but it
// will not be forced to.
// +optional
optional bool readOnlyRootFilesystem = 14;
// DefaultAllowPrivilegeEscalation controls the default setting for whether a
// process can gain more privileges than its parent process.
// +optional
optional bool defaultAllowPrivilegeEscalation = 15;
// AllowPrivilegeEscalation determines if a pod can request to allow
// privilege escalation. If unspecified, defaults to true.
// +optional
optional bool allowPrivilegeEscalation = 16;
// is a white list of allowed host paths. Empty indicates that all host paths may be used.
// +optional
repeated AllowedHostPath allowedHostPaths = 17;
// AllowedFlexVolumes is a whitelist of allowed Flexvolumes. Empty or nil indicates that all
// Flexvolumes may be used. This parameter is effective only when the usage of the Flexvolumes
// is allowed in the "Volumes" field.
// +optional
repeated AllowedFlexVolume allowedFlexVolumes = 18;
}
// Run A sUser Strategy Options defines the strategy type and any options used to create the strategy.
message RunAsUserStrategyOptions {
// Rule is the strategy that will dictate the allowable RunAsUser values that may be set.
optional string rule = 1;
// Ranges are the allowed ranges of uids that may be used.
// +optional
repeated IDRange ranges = 2;
}
// SELinux Strategy Options defines the strategy type and any options used to create the strategy.
message SELinuxStrategyOptions {
// type is the strategy that will dictate the allowable labels that may be set.
optional string rule = 1;
// seLinuxOptions required to run as; required for MustRunAs
// More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
// +optional
optional k8s.io.api.core.v1.SELinuxOptions seLinuxOptions = 2;
}
// SupplementalGroupsStrategyOptions defines the strategy type and options used to create the strategy.
message SupplementalGroupsStrategyOptions {
// Rule is the strategy that will dictate what supplemental groups is used in the SecurityContext.
// +optional
optional string rule = 1;
// Ranges are the allowed ranges of supplemental groups. If you would like to force a single
// supplemental group then supply a single range with the same start and end.
// +optional
repeated IDRange ranges = 2;
}

View File

@ -46,6 +46,8 @@ func addKnownTypes(scheme *runtime.Scheme) error {
scheme.AddKnownTypes(SchemeGroupVersion,
&PodDisruptionBudget{},
&PodDisruptionBudgetList{},
&PodSecurityPolicy{},
&PodSecurityPolicyList{},
&Eviction{},
)
// Add the watch version that applies

View File

@ -17,6 +17,7 @@ limitations under the License.
package v1beta1
import (
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
)
@ -113,3 +114,255 @@ type Eviction struct {
// DeleteOptions may be provided
DeleteOptions *metav1.DeleteOptions `json:"deleteOptions,omitempty" protobuf:"bytes,2,opt,name=deleteOptions"`
}
// +genclient
// +genclient:nonNamespaced
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// Pod Security Policy governs the ability to make requests that affect the Security Context
// that will be applied to a pod and container.
type PodSecurityPolicy struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// spec defines the policy enforced.
// +optional
Spec PodSecurityPolicySpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
}
// Pod Security Policy Spec defines the policy enforced.
type PodSecurityPolicySpec struct {
// privileged determines if a pod can request to be run as privileged.
// +optional
Privileged bool `json:"privileged,omitempty" protobuf:"varint,1,opt,name=privileged"`
// DefaultAddCapabilities is the default set of capabilities that will be added to the container
// unless the pod spec specifically drops the capability. You may not list a capability in both
// DefaultAddCapabilities and RequiredDropCapabilities. Capabilities added here are implicitly
// allowed, and need not be included in the AllowedCapabilities list.
// +optional
DefaultAddCapabilities []v1.Capability `json:"defaultAddCapabilities,omitempty" protobuf:"bytes,2,rep,name=defaultAddCapabilities,casttype=k8s.io/api/core/v1.Capability"`
// RequiredDropCapabilities are the capabilities that will be dropped from the container. These
// are required to be dropped and cannot be added.
// +optional
RequiredDropCapabilities []v1.Capability `json:"requiredDropCapabilities,omitempty" protobuf:"bytes,3,rep,name=requiredDropCapabilities,casttype=k8s.io/api/core/v1.Capability"`
// AllowedCapabilities is a list of capabilities that can be requested to add to the container.
// Capabilities in this field may be added at the pod author's discretion.
// You must not list a capability in both AllowedCapabilities and RequiredDropCapabilities.
// +optional
AllowedCapabilities []v1.Capability `json:"allowedCapabilities,omitempty" protobuf:"bytes,4,rep,name=allowedCapabilities,casttype=k8s.io/api/core/v1.Capability"`
// volumes is a white list of allowed volume plugins. Empty indicates that all plugins
// may be used.
// +optional
Volumes []FSType `json:"volumes,omitempty" protobuf:"bytes,5,rep,name=volumes,casttype=FSType"`
// hostNetwork determines if the policy allows the use of HostNetwork in the pod spec.
// +optional
HostNetwork bool `json:"hostNetwork,omitempty" protobuf:"varint,6,opt,name=hostNetwork"`
// hostPorts determines which host port ranges are allowed to be exposed.
// +optional
HostPorts []HostPortRange `json:"hostPorts,omitempty" protobuf:"bytes,7,rep,name=hostPorts"`
// hostPID determines if the policy allows the use of HostPID in the pod spec.
// +optional
HostPID bool `json:"hostPID,omitempty" protobuf:"varint,8,opt,name=hostPID"`
// hostIPC determines if the policy allows the use of HostIPC in the pod spec.
// +optional
HostIPC bool `json:"hostIPC,omitempty" protobuf:"varint,9,opt,name=hostIPC"`
// seLinux is the strategy that will dictate the allowable labels that may be set.
SELinux SELinuxStrategyOptions `json:"seLinux" protobuf:"bytes,10,opt,name=seLinux"`
// runAsUser is the strategy that will dictate the allowable RunAsUser values that may be set.
RunAsUser RunAsUserStrategyOptions `json:"runAsUser" protobuf:"bytes,11,opt,name=runAsUser"`
// SupplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext.
SupplementalGroups SupplementalGroupsStrategyOptions `json:"supplementalGroups" protobuf:"bytes,12,opt,name=supplementalGroups"`
// FSGroup is the strategy that will dictate what fs group is used by the SecurityContext.
FSGroup FSGroupStrategyOptions `json:"fsGroup" protobuf:"bytes,13,opt,name=fsGroup"`
// ReadOnlyRootFilesystem when set to true will force containers to run with a read only root file
// system. If the container specifically requests to run with a non-read only root file system
// the PSP should deny the pod.
// If set to false the container may run with a read only root file system if it wishes but it
// will not be forced to.
// +optional
ReadOnlyRootFilesystem bool `json:"readOnlyRootFilesystem,omitempty" protobuf:"varint,14,opt,name=readOnlyRootFilesystem"`
// DefaultAllowPrivilegeEscalation controls the default setting for whether a
// process can gain more privileges than its parent process.
// +optional
DefaultAllowPrivilegeEscalation *bool `json:"defaultAllowPrivilegeEscalation,omitempty" protobuf:"varint,15,opt,name=defaultAllowPrivilegeEscalation"`
// AllowPrivilegeEscalation determines if a pod can request to allow
// privilege escalation. If unspecified, defaults to true.
// +optional
AllowPrivilegeEscalation *bool `json:"allowPrivilegeEscalation,omitempty" protobuf:"varint,16,opt,name=allowPrivilegeEscalation"`
// is a white list of allowed host paths. Empty indicates that all host paths may be used.
// +optional
AllowedHostPaths []AllowedHostPath `json:"allowedHostPaths,omitempty" protobuf:"bytes,17,rep,name=allowedHostPaths"`
// AllowedFlexVolumes is a whitelist of allowed Flexvolumes. Empty or nil indicates that all
// Flexvolumes may be used. This parameter is effective only when the usage of the Flexvolumes
// is allowed in the "Volumes" field.
// +optional
AllowedFlexVolumes []AllowedFlexVolume `json:"allowedFlexVolumes,omitempty" protobuf:"bytes,18,rep,name=allowedFlexVolumes"`
}
// defines the host volume conditions that will be enabled by a policy
// for pods to use. It requires the path prefix to be defined.
type AllowedHostPath struct {
// is the path prefix that the host volume must match.
// It does not support `*`.
// Trailing slashes are trimmed when validating the path prefix with a host path.
//
// Examples:
// `/foo` would allow `/foo`, `/foo/` and `/foo/bar`
// `/foo` would not allow `/food` or `/etc/foo`
PathPrefix string `json:"pathPrefix,omitempty" protobuf:"bytes,1,rep,name=pathPrefix"`
}
// FS Type gives strong typing to different file systems that are used by volumes.
type FSType string
var (
AzureFile FSType = "azureFile"
Flocker FSType = "flocker"
FlexVolume FSType = "flexVolume"
HostPath FSType = "hostPath"
EmptyDir FSType = "emptyDir"
GCEPersistentDisk FSType = "gcePersistentDisk"
AWSElasticBlockStore FSType = "awsElasticBlockStore"
GitRepo FSType = "gitRepo"
Secret FSType = "secret"
NFS FSType = "nfs"
ISCSI FSType = "iscsi"
Glusterfs FSType = "glusterfs"
PersistentVolumeClaim FSType = "persistentVolumeClaim"
RBD FSType = "rbd"
Cinder FSType = "cinder"
CephFS FSType = "cephFS"
DownwardAPI FSType = "downwardAPI"
FC FSType = "fc"
ConfigMap FSType = "configMap"
Quobyte FSType = "quobyte"
AzureDisk FSType = "azureDisk"
All FSType = "*"
)
// AllowedFlexVolume represents a single Flexvolume that is allowed to be used.
type AllowedFlexVolume struct {
// Driver is the name of the Flexvolume driver.
Driver string `json:"driver" protobuf:"bytes,1,opt,name=driver"`
}
// Host Port Range defines a range of host ports that will be enabled by a policy
// for pods to use. It requires both the start and end to be defined.
type HostPortRange struct {
// min is the start of the range, inclusive.
Min int32 `json:"min" protobuf:"varint,1,opt,name=min"`
// max is the end of the range, inclusive.
Max int32 `json:"max" protobuf:"varint,2,opt,name=max"`
}
// SELinux Strategy Options defines the strategy type and any options used to create the strategy.
type SELinuxStrategyOptions struct {
// type is the strategy that will dictate the allowable labels that may be set.
Rule SELinuxStrategy `json:"rule" protobuf:"bytes,1,opt,name=rule,casttype=SELinuxStrategy"`
// seLinuxOptions required to run as; required for MustRunAs
// More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
// +optional
SELinuxOptions *v1.SELinuxOptions `json:"seLinuxOptions,omitempty" protobuf:"bytes,2,opt,name=seLinuxOptions"`
}
// SELinuxStrategy denotes strategy types for generating SELinux options for a
// Security Context.
type SELinuxStrategy string
const (
// container must have SELinux labels of X applied.
SELinuxStrategyMustRunAs SELinuxStrategy = "MustRunAs"
// container may make requests for any SELinux context labels.
SELinuxStrategyRunAsAny SELinuxStrategy = "RunAsAny"
)
// Run A sUser Strategy Options defines the strategy type and any options used to create the strategy.
type RunAsUserStrategyOptions struct {
// Rule is the strategy that will dictate the allowable RunAsUser values that may be set.
Rule RunAsUserStrategy `json:"rule" protobuf:"bytes,1,opt,name=rule,casttype=RunAsUserStrategy"`
// Ranges are the allowed ranges of uids that may be used.
// +optional
Ranges []IDRange `json:"ranges,omitempty" protobuf:"bytes,2,rep,name=ranges"`
}
// ID Range provides a min/max of an allowed range of IDs.
type IDRange struct {
// Min is the start of the range, inclusive.
Min int64 `json:"min" protobuf:"varint,1,opt,name=min"`
// Max is the end of the range, inclusive.
Max int64 `json:"max" protobuf:"varint,2,opt,name=max"`
}
// RunAsUserStrategy denotes strategy types for generating RunAsUser values for a
// Security Context.
type RunAsUserStrategy string
const (
// container must run as a particular uid.
RunAsUserStrategyMustRunAs RunAsUserStrategy = "MustRunAs"
// container must run as a non-root uid
RunAsUserStrategyMustRunAsNonRoot RunAsUserStrategy = "MustRunAsNonRoot"
// container may make requests for any uid.
RunAsUserStrategyRunAsAny RunAsUserStrategy = "RunAsAny"
)
// FSGroupStrategyOptions defines the strategy type and options used to create the strategy.
type FSGroupStrategyOptions struct {
// Rule is the strategy that will dictate what FSGroup is used in the SecurityContext.
// +optional
Rule FSGroupStrategyType `json:"rule,omitempty" protobuf:"bytes,1,opt,name=rule,casttype=FSGroupStrategyType"`
// Ranges are the allowed ranges of fs groups. If you would like to force a single
// fs group then supply a single range with the same start and end.
// +optional
Ranges []IDRange `json:"ranges,omitempty" protobuf:"bytes,2,rep,name=ranges"`
}
// FSGroupStrategyType denotes strategy types for generating FSGroup values for a
// SecurityContext
type FSGroupStrategyType string
const (
// container must have FSGroup of X applied.
FSGroupStrategyMustRunAs FSGroupStrategyType = "MustRunAs"
// container may make requests for any FSGroup labels.
FSGroupStrategyRunAsAny FSGroupStrategyType = "RunAsAny"
)
// SupplementalGroupsStrategyOptions defines the strategy type and options used to create the strategy.
type SupplementalGroupsStrategyOptions struct {
// Rule is the strategy that will dictate what supplemental groups is used in the SecurityContext.
// +optional
Rule SupplementalGroupsStrategyType `json:"rule,omitempty" protobuf:"bytes,1,opt,name=rule,casttype=SupplementalGroupsStrategyType"`
// Ranges are the allowed ranges of supplemental groups. If you would like to force a single
// supplemental group then supply a single range with the same start and end.
// +optional
Ranges []IDRange `json:"ranges,omitempty" protobuf:"bytes,2,rep,name=ranges"`
}
// SupplementalGroupsStrategyType denotes strategy types for determining valid supplemental
// groups for a SecurityContext.
type SupplementalGroupsStrategyType string
const (
// container must run as a particular gid.
SupplementalGroupsStrategyMustRunAs SupplementalGroupsStrategyType = "MustRunAs"
// container may make requests for any gid.
SupplementalGroupsStrategyRunAsAny SupplementalGroupsStrategyType = "RunAsAny"
)
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// Pod Security Policy List is a list of PodSecurityPolicy objects.
type PodSecurityPolicyList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Items is a list of schema objects.
Items []PodSecurityPolicy `json:"items" protobuf:"bytes,2,rep,name=items"`
}

View File

@ -27,6 +27,24 @@ package v1beta1
// Those methods can be generated by using hack/update-generated-swagger-docs.sh
// AUTO-GENERATED FUNCTIONS START HERE
var map_AllowedFlexVolume = map[string]string{
"": "AllowedFlexVolume represents a single Flexvolume that is allowed to be used.",
"driver": "Driver is the name of the Flexvolume driver.",
}
func (AllowedFlexVolume) SwaggerDoc() map[string]string {
return map_AllowedFlexVolume
}
var map_AllowedHostPath = map[string]string{
"": "defines the host volume conditions that will be enabled by a policy for pods to use. It requires the path prefix to be defined.",
"pathPrefix": "is the path prefix that the host volume must match. It does not support `*`. Trailing slashes are trimmed when validating the path prefix with a host path.\n\nExamples: `/foo` would allow `/foo`, `/foo/` and `/foo/bar` `/foo` would not allow `/food` or `/etc/foo`",
}
func (AllowedHostPath) SwaggerDoc() map[string]string {
return map_AllowedHostPath
}
var map_Eviction = map[string]string{
"": "Eviction evicts a pod from its node subject to certain policies and safety constraints. This is a subresource of Pod. A request to cause such an eviction is created by POSTing to .../pods/<pod name>/evictions.",
"metadata": "ObjectMeta describes the pod that is being evicted.",
@ -37,6 +55,36 @@ func (Eviction) SwaggerDoc() map[string]string {
return map_Eviction
}
var map_FSGroupStrategyOptions = map[string]string{
"": "FSGroupStrategyOptions defines the strategy type and options used to create the strategy.",
"rule": "Rule is the strategy that will dictate what FSGroup is used in the SecurityContext.",
"ranges": "Ranges are the allowed ranges of fs groups. If you would like to force a single fs group then supply a single range with the same start and end.",
}
func (FSGroupStrategyOptions) SwaggerDoc() map[string]string {
return map_FSGroupStrategyOptions
}
var map_HostPortRange = map[string]string{
"": "Host Port Range defines a range of host ports that will be enabled by a policy for pods to use. It requires both the start and end to be defined.",
"min": "min is the start of the range, inclusive.",
"max": "max is the end of the range, inclusive.",
}
func (HostPortRange) SwaggerDoc() map[string]string {
return map_HostPortRange
}
var map_IDRange = map[string]string{
"": "ID Range provides a min/max of an allowed range of IDs.",
"min": "Min is the start of the range, inclusive.",
"max": "Max is the end of the range, inclusive.",
}
func (IDRange) SwaggerDoc() map[string]string {
return map_IDRange
}
var map_PodDisruptionBudget = map[string]string{
"": "PodDisruptionBudget is an object to define the max disruption that can be caused to a collection of pods",
"spec": "Specification of the desired behavior of the PodDisruptionBudget.",
@ -80,4 +128,80 @@ func (PodDisruptionBudgetStatus) SwaggerDoc() map[string]string {
return map_PodDisruptionBudgetStatus
}
var map_PodSecurityPolicy = map[string]string{
"": "Pod Security Policy governs the ability to make requests that affect the Security Context that will be applied to a pod and container.",
"metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata",
"spec": "spec defines the policy enforced.",
}
func (PodSecurityPolicy) SwaggerDoc() map[string]string {
return map_PodSecurityPolicy
}
var map_PodSecurityPolicyList = map[string]string{
"": "Pod Security Policy List is a list of PodSecurityPolicy objects.",
"metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata",
"items": "Items is a list of schema objects.",
}
func (PodSecurityPolicyList) SwaggerDoc() map[string]string {
return map_PodSecurityPolicyList
}
var map_PodSecurityPolicySpec = map[string]string{
"": "Pod Security Policy Spec defines the policy enforced.",
"privileged": "privileged determines if a pod can request to be run as privileged.",
"defaultAddCapabilities": "DefaultAddCapabilities is the default set of capabilities that will be added to the container unless the pod spec specifically drops the capability. You may not list a capability in both DefaultAddCapabilities and RequiredDropCapabilities. Capabilities added here are implicitly allowed, and need not be included in the AllowedCapabilities list.",
"requiredDropCapabilities": "RequiredDropCapabilities are the capabilities that will be dropped from the container. These are required to be dropped and cannot be added.",
"allowedCapabilities": "AllowedCapabilities is a list of capabilities that can be requested to add to the container. Capabilities in this field may be added at the pod author's discretion. You must not list a capability in both AllowedCapabilities and RequiredDropCapabilities.",
"volumes": "volumes is a white list of allowed volume plugins. Empty indicates that all plugins may be used.",
"hostNetwork": "hostNetwork determines if the policy allows the use of HostNetwork in the pod spec.",
"hostPorts": "hostPorts determines which host port ranges are allowed to be exposed.",
"hostPID": "hostPID determines if the policy allows the use of HostPID in the pod spec.",
"hostIPC": "hostIPC determines if the policy allows the use of HostIPC in the pod spec.",
"seLinux": "seLinux is the strategy that will dictate the allowable labels that may be set.",
"runAsUser": "runAsUser is the strategy that will dictate the allowable RunAsUser values that may be set.",
"supplementalGroups": "SupplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext.",
"fsGroup": "FSGroup is the strategy that will dictate what fs group is used by the SecurityContext.",
"readOnlyRootFilesystem": "ReadOnlyRootFilesystem when set to true will force containers to run with a read only root file system. If the container specifically requests to run with a non-read only root file system the PSP should deny the pod. If set to false the container may run with a read only root file system if it wishes but it will not be forced to.",
"defaultAllowPrivilegeEscalation": "DefaultAllowPrivilegeEscalation controls the default setting for whether a process can gain more privileges than its parent process.",
"allowPrivilegeEscalation": "AllowPrivilegeEscalation determines if a pod can request to allow privilege escalation. If unspecified, defaults to true.",
"allowedHostPaths": "is a white list of allowed host paths. Empty indicates that all host paths may be used.",
"allowedFlexVolumes": "AllowedFlexVolumes is a whitelist of allowed Flexvolumes. Empty or nil indicates that all Flexvolumes may be used. This parameter is effective only when the usage of the Flexvolumes is allowed in the \"Volumes\" field.",
}
func (PodSecurityPolicySpec) SwaggerDoc() map[string]string {
return map_PodSecurityPolicySpec
}
var map_RunAsUserStrategyOptions = map[string]string{
"": "Run A sUser Strategy Options defines the strategy type and any options used to create the strategy.",
"rule": "Rule is the strategy that will dictate the allowable RunAsUser values that may be set.",
"ranges": "Ranges are the allowed ranges of uids that may be used.",
}
func (RunAsUserStrategyOptions) SwaggerDoc() map[string]string {
return map_RunAsUserStrategyOptions
}
var map_SELinuxStrategyOptions = map[string]string{
"": "SELinux Strategy Options defines the strategy type and any options used to create the strategy.",
"rule": "type is the strategy that will dictate the allowable labels that may be set.",
"seLinuxOptions": "seLinuxOptions required to run as; required for MustRunAs More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/",
}
func (SELinuxStrategyOptions) SwaggerDoc() map[string]string {
return map_SELinuxStrategyOptions
}
var map_SupplementalGroupsStrategyOptions = map[string]string{
"": "SupplementalGroupsStrategyOptions defines the strategy type and options used to create the strategy.",
"rule": "Rule is the strategy that will dictate what supplemental groups is used in the SecurityContext.",
"ranges": "Ranges are the allowed ranges of supplemental groups. If you would like to force a single supplemental group then supply a single range with the same start and end.",
}
func (SupplementalGroupsStrategyOptions) SwaggerDoc() map[string]string {
return map_SupplementalGroupsStrategyOptions
}
// AUTO-GENERATED FUNCTIONS END HERE

View File

@ -16,16 +16,49 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
// This file was autogenerated by deepcopy-gen. Do not edit it manually!
// Code generated by deepcopy-gen. DO NOT EDIT.
package v1beta1
import (
core_v1 "k8s.io/api/core/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
intstr "k8s.io/apimachinery/pkg/util/intstr"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *AllowedFlexVolume) DeepCopyInto(out *AllowedFlexVolume) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllowedFlexVolume.
func (in *AllowedFlexVolume) DeepCopy() *AllowedFlexVolume {
if in == nil {
return nil
}
out := new(AllowedFlexVolume)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *AllowedHostPath) DeepCopyInto(out *AllowedHostPath) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllowedHostPath.
func (in *AllowedHostPath) DeepCopy() *AllowedHostPath {
if in == nil {
return nil
}
out := new(AllowedHostPath)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Eviction) DeepCopyInto(out *Eviction) {
*out = *in
@ -57,9 +90,61 @@ func (in *Eviction) DeepCopy() *Eviction {
func (in *Eviction) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *FSGroupStrategyOptions) DeepCopyInto(out *FSGroupStrategyOptions) {
*out = *in
if in.Ranges != nil {
in, out := &in.Ranges, &out.Ranges
*out = make([]IDRange, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FSGroupStrategyOptions.
func (in *FSGroupStrategyOptions) DeepCopy() *FSGroupStrategyOptions {
if in == nil {
return nil
}
out := new(FSGroupStrategyOptions)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *HostPortRange) DeepCopyInto(out *HostPortRange) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostPortRange.
func (in *HostPortRange) DeepCopy() *HostPortRange {
if in == nil {
return nil
}
out := new(HostPortRange)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *IDRange) DeepCopyInto(out *IDRange) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IDRange.
func (in *IDRange) DeepCopy() *IDRange {
if in == nil {
return nil
}
out := new(IDRange)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -86,9 +171,8 @@ func (in *PodDisruptionBudget) DeepCopy() *PodDisruptionBudget {
func (in *PodDisruptionBudget) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -120,9 +204,8 @@ func (in *PodDisruptionBudgetList) DeepCopy() *PodDisruptionBudgetList {
func (in *PodDisruptionBudgetList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -190,3 +273,203 @@ func (in *PodDisruptionBudgetStatus) DeepCopy() *PodDisruptionBudgetStatus {
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodSecurityPolicy) DeepCopyInto(out *PodSecurityPolicy) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSecurityPolicy.
func (in *PodSecurityPolicy) DeepCopy() *PodSecurityPolicy {
if in == nil {
return nil
}
out := new(PodSecurityPolicy)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *PodSecurityPolicy) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodSecurityPolicyList) DeepCopyInto(out *PodSecurityPolicyList) {
*out = *in
out.TypeMeta = in.TypeMeta
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]PodSecurityPolicy, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSecurityPolicyList.
func (in *PodSecurityPolicyList) DeepCopy() *PodSecurityPolicyList {
if in == nil {
return nil
}
out := new(PodSecurityPolicyList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *PodSecurityPolicyList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodSecurityPolicySpec) DeepCopyInto(out *PodSecurityPolicySpec) {
*out = *in
if in.DefaultAddCapabilities != nil {
in, out := &in.DefaultAddCapabilities, &out.DefaultAddCapabilities
*out = make([]core_v1.Capability, len(*in))
copy(*out, *in)
}
if in.RequiredDropCapabilities != nil {
in, out := &in.RequiredDropCapabilities, &out.RequiredDropCapabilities
*out = make([]core_v1.Capability, len(*in))
copy(*out, *in)
}
if in.AllowedCapabilities != nil {
in, out := &in.AllowedCapabilities, &out.AllowedCapabilities
*out = make([]core_v1.Capability, len(*in))
copy(*out, *in)
}
if in.Volumes != nil {
in, out := &in.Volumes, &out.Volumes
*out = make([]FSType, len(*in))
copy(*out, *in)
}
if in.HostPorts != nil {
in, out := &in.HostPorts, &out.HostPorts
*out = make([]HostPortRange, len(*in))
copy(*out, *in)
}
in.SELinux.DeepCopyInto(&out.SELinux)
in.RunAsUser.DeepCopyInto(&out.RunAsUser)
in.SupplementalGroups.DeepCopyInto(&out.SupplementalGroups)
in.FSGroup.DeepCopyInto(&out.FSGroup)
if in.DefaultAllowPrivilegeEscalation != nil {
in, out := &in.DefaultAllowPrivilegeEscalation, &out.DefaultAllowPrivilegeEscalation
if *in == nil {
*out = nil
} else {
*out = new(bool)
**out = **in
}
}
if in.AllowPrivilegeEscalation != nil {
in, out := &in.AllowPrivilegeEscalation, &out.AllowPrivilegeEscalation
if *in == nil {
*out = nil
} else {
*out = new(bool)
**out = **in
}
}
if in.AllowedHostPaths != nil {
in, out := &in.AllowedHostPaths, &out.AllowedHostPaths
*out = make([]AllowedHostPath, len(*in))
copy(*out, *in)
}
if in.AllowedFlexVolumes != nil {
in, out := &in.AllowedFlexVolumes, &out.AllowedFlexVolumes
*out = make([]AllowedFlexVolume, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSecurityPolicySpec.
func (in *PodSecurityPolicySpec) DeepCopy() *PodSecurityPolicySpec {
if in == nil {
return nil
}
out := new(PodSecurityPolicySpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RunAsUserStrategyOptions) DeepCopyInto(out *RunAsUserStrategyOptions) {
*out = *in
if in.Ranges != nil {
in, out := &in.Ranges, &out.Ranges
*out = make([]IDRange, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RunAsUserStrategyOptions.
func (in *RunAsUserStrategyOptions) DeepCopy() *RunAsUserStrategyOptions {
if in == nil {
return nil
}
out := new(RunAsUserStrategyOptions)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SELinuxStrategyOptions) DeepCopyInto(out *SELinuxStrategyOptions) {
*out = *in
if in.SELinuxOptions != nil {
in, out := &in.SELinuxOptions, &out.SELinuxOptions
if *in == nil {
*out = nil
} else {
*out = new(core_v1.SELinuxOptions)
**out = **in
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SELinuxStrategyOptions.
func (in *SELinuxStrategyOptions) DeepCopy() *SELinuxStrategyOptions {
if in == nil {
return nil
}
out := new(SELinuxStrategyOptions)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SupplementalGroupsStrategyOptions) DeepCopyInto(out *SupplementalGroupsStrategyOptions) {
*out = *in
if in.Ranges != nil {
in, out := &in.Ranges, &out.Ranges
*out = make([]IDRange, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SupplementalGroupsStrategyOptions.
func (in *SupplementalGroupsStrategyOptions) DeepCopy() *SupplementalGroupsStrategyOptions {
if in == nil {
return nil
}
out := new(SupplementalGroupsStrategyOptions)
in.DeepCopyInto(out)
return out
}

View File

@ -16,7 +16,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
// This file was autogenerated by deepcopy-gen. Do not edit it manually!
// Code generated by deepcopy-gen. DO NOT EDIT.
package v1
@ -86,9 +86,8 @@ func (in *ClusterRole) DeepCopy() *ClusterRole {
func (in *ClusterRole) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -119,9 +118,8 @@ func (in *ClusterRoleBinding) DeepCopy() *ClusterRoleBinding {
func (in *ClusterRoleBinding) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -153,9 +151,8 @@ func (in *ClusterRoleBindingList) DeepCopy() *ClusterRoleBindingList {
func (in *ClusterRoleBindingList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -187,9 +184,8 @@ func (in *ClusterRoleList) DeepCopy() *ClusterRoleList {
func (in *ClusterRoleList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -262,9 +258,8 @@ func (in *Role) DeepCopy() *Role {
func (in *Role) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -295,9 +290,8 @@ func (in *RoleBinding) DeepCopy() *RoleBinding {
func (in *RoleBinding) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -329,9 +323,8 @@ func (in *RoleBindingList) DeepCopy() *RoleBindingList {
func (in *RoleBindingList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -363,9 +356,8 @@ func (in *RoleList) DeepCopy() *RoleList {
func (in *RoleList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

View File

@ -16,7 +16,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
// This file was autogenerated by deepcopy-gen. Do not edit it manually!
// Code generated by deepcopy-gen. DO NOT EDIT.
package v1alpha1
@ -86,9 +86,8 @@ func (in *ClusterRole) DeepCopy() *ClusterRole {
func (in *ClusterRole) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -119,9 +118,8 @@ func (in *ClusterRoleBinding) DeepCopy() *ClusterRoleBinding {
func (in *ClusterRoleBinding) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -153,9 +151,8 @@ func (in *ClusterRoleBindingList) DeepCopy() *ClusterRoleBindingList {
func (in *ClusterRoleBindingList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -187,9 +184,8 @@ func (in *ClusterRoleList) DeepCopy() *ClusterRoleList {
func (in *ClusterRoleList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -262,9 +258,8 @@ func (in *Role) DeepCopy() *Role {
func (in *Role) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -295,9 +290,8 @@ func (in *RoleBinding) DeepCopy() *RoleBinding {
func (in *RoleBinding) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -329,9 +323,8 @@ func (in *RoleBindingList) DeepCopy() *RoleBindingList {
func (in *RoleBindingList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -363,9 +356,8 @@ func (in *RoleList) DeepCopy() *RoleList {
func (in *RoleList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

View File

@ -16,7 +16,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
// This file was autogenerated by deepcopy-gen. Do not edit it manually!
// Code generated by deepcopy-gen. DO NOT EDIT.
package v1beta1
@ -86,9 +86,8 @@ func (in *ClusterRole) DeepCopy() *ClusterRole {
func (in *ClusterRole) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -119,9 +118,8 @@ func (in *ClusterRoleBinding) DeepCopy() *ClusterRoleBinding {
func (in *ClusterRoleBinding) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -153,9 +151,8 @@ func (in *ClusterRoleBindingList) DeepCopy() *ClusterRoleBindingList {
func (in *ClusterRoleBindingList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -187,9 +184,8 @@ func (in *ClusterRoleList) DeepCopy() *ClusterRoleList {
func (in *ClusterRoleList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -262,9 +258,8 @@ func (in *Role) DeepCopy() *Role {
func (in *Role) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -295,9 +290,8 @@ func (in *RoleBinding) DeepCopy() *RoleBinding {
func (in *RoleBinding) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -329,9 +323,8 @@ func (in *RoleBindingList) DeepCopy() *RoleBindingList {
func (in *RoleBindingList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -363,9 +356,8 @@ func (in *RoleList) DeepCopy() *RoleList {
func (in *RoleList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

View File

@ -43,6 +43,9 @@ message PriorityClass {
// globalDefault specifies whether this PriorityClass should be considered as
// the default priority for pods that do not have any priority class.
// Only one PriorityClass can be marked as `globalDefault`. However, if more than
// one PriorityClasses exists with their `globalDefault` field set to true,
// the smallest value of such global default PriorityClasses will be used as the default priority.
// +optional
optional bool globalDefault = 3;

View File

@ -39,6 +39,9 @@ type PriorityClass struct {
// globalDefault specifies whether this PriorityClass should be considered as
// the default priority for pods that do not have any priority class.
// Only one PriorityClass can be marked as `globalDefault`. However, if more than
// one PriorityClasses exists with their `globalDefault` field set to true,
// the smallest value of such global default PriorityClasses will be used as the default priority.
// +optional
GlobalDefault bool `json:"globalDefault,omitempty" protobuf:"bytes,3,opt,name=globalDefault"`

View File

@ -31,7 +31,7 @@ var map_PriorityClass = map[string]string{
"": "PriorityClass defines mapping from a priority class name to the priority integer value. The value can be any valid integer.",
"metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata",
"value": "The value of this priority class. This is the actual priority that pods receive when they have the name of this class in their pod spec.",
"globalDefault": "globalDefault specifies whether this PriorityClass should be considered as the default priority for pods that do not have any priority class.",
"globalDefault": "globalDefault specifies whether this PriorityClass should be considered as the default priority for pods that do not have any priority class. Only one PriorityClass can be marked as `globalDefault`. However, if more than one PriorityClasses exists with their `globalDefault` field set to true, the smallest value of such global default PriorityClasses will be used as the default priority.",
"description": "description is an arbitrary string that usually provides guidelines on when this priority class should be used.",
}

View File

@ -16,7 +16,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
// This file was autogenerated by deepcopy-gen. Do not edit it manually!
// Code generated by deepcopy-gen. DO NOT EDIT.
package v1alpha1
@ -46,9 +46,8 @@ func (in *PriorityClass) DeepCopy() *PriorityClass {
func (in *PriorityClass) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -80,7 +79,6 @@ func (in *PriorityClassList) DeepCopy() *PriorityClassList {
func (in *PriorityClassList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}

View File

@ -16,7 +16,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
// This file was autogenerated by deepcopy-gen. Do not edit it manually!
// Code generated by deepcopy-gen. DO NOT EDIT.
package v1alpha1
@ -48,9 +48,8 @@ func (in *PodPreset) DeepCopy() *PodPreset {
func (in *PodPreset) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -82,9 +81,8 @@ func (in *PodPresetList) DeepCopy() *PodPresetList {
func (in *PodPresetList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

View File

@ -16,7 +16,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
// This file was autogenerated by deepcopy-gen. Do not edit it manually!
// Code generated by deepcopy-gen. DO NOT EDIT.
package v1
@ -86,9 +86,8 @@ func (in *StorageClass) DeepCopy() *StorageClass {
func (in *StorageClass) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -120,7 +119,6 @@ func (in *StorageClassList) DeepCopy() *StorageClassList {
func (in *StorageClassList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}

View File

@ -16,7 +16,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
// This file was autogenerated by deepcopy-gen. Do not edit it manually!
// Code generated by deepcopy-gen. DO NOT EDIT.
package v1alpha1
@ -48,9 +48,8 @@ func (in *VolumeAttachment) DeepCopy() *VolumeAttachment {
func (in *VolumeAttachment) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -82,9 +81,8 @@ func (in *VolumeAttachmentList) DeepCopy() *VolumeAttachmentList {
func (in *VolumeAttachmentList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

View File

@ -16,7 +16,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
// This file was autogenerated by deepcopy-gen. Do not edit it manually!
// Code generated by deepcopy-gen. DO NOT EDIT.
package v1beta1
@ -86,9 +86,8 @@ func (in *StorageClass) DeepCopy() *StorageClass {
func (in *StorageClass) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -120,9 +119,8 @@ func (in *StorageClassList) DeepCopy() *StorageClassList {
func (in *StorageClassList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -149,9 +147,8 @@ func (in *VolumeAttachment) DeepCopy() *VolumeAttachment {
func (in *VolumeAttachment) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -183,9 +180,8 @@ func (in *VolumeAttachmentList) DeepCopy() *VolumeAttachmentList {
func (in *VolumeAttachmentList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

View File

@ -33,6 +33,12 @@ const (
//
// CustomResourceValidation is a list of validation methods for CustomResources
CustomResourceValidation utilfeature.Feature = "CustomResourceValidation"
// owner: @sttts, @nikhita
// alpha: v1.10
//
// CustomResourceSubresources defines the subresources for CustomResources
CustomResourceSubresources utilfeature.Feature = "CustomResourceSubresources"
)
func init() {
@ -43,5 +49,6 @@ func init() {
// To add a new feature, define a key for it above and add it here. The features will be
// available throughout Kubernetes binaries.
var defaultKubernetesFeatureGates = map[utilfeature.Feature]utilfeature.FeatureSpec{
CustomResourceValidation: {Default: true, PreRelease: utilfeature.Beta},
CustomResourceValidation: {Default: true, PreRelease: utilfeature.Beta},
CustomResourceSubresources: {Default: false, PreRelease: utilfeature.Alpha},
}

6
vendor/k8s.io/kubernetes/README.md generated vendored
View File

@ -68,10 +68,10 @@ That said, if you have questions, reach out to us
[announcement]: https://cncf.io/news/announcement/2015/07/new-cloud-native-computing-foundation-drive-alignment-among-container
[Borg]: https://research.google.com/pubs/pub43438.html
[CNCF]: https://www.cncf.io/about
[communication]: https://github.com/kubernetes/community/blob/master/communication.md
[community repository]: https://github.com/kubernetes/community
[communication]: https://git.k8s.io/community/communication
[community repository]: https://git.k8s.io/community
[containerized applications]: https://kubernetes.io/docs/concepts/overview/what-is-kubernetes/
[developer's documentation]: https://github.com/kubernetes/community/tree/master/contributors/devel#readme
[developer's documentation]: https://git.k8s.io/community/contributors/devel#readme
[Docker environment]: https://docs.docker.com/engine
[Go environment]: https://golang.org/doc/install
[GoDoc]: https://godoc.org/k8s.io/kubernetes

View File

@ -111,6 +111,12 @@ var (
// Kubernetes, and have special scaling options on top of those available
// to normal per-pod metrics (the "pods" source).
ResourceMetricSourceType MetricSourceType = "Resource"
// ExternalMetricSourceType is a global metric that is not associated
// with any Kubernetes object. It allows autoscaling based on information
// coming from components running outside of cluster
// (for example length of queue in cloud messaging service, or
// QPS from loadbalancer running outside of cluster).
ExternalMetricSourceType MetricSourceType = "External"
)
// MetricSpec specifies how to scale based on a single metric
@ -136,6 +142,13 @@ type MetricSpec struct {
// to normal per-pod metrics using the "pods" source.
// +optional
Resource *ResourceMetricSource
// External refers to a global metric that is not associated
// with any Kubernetes object. It allows autoscaling based on information
// coming from components running outside of cluster
// (for example length of queue in cloud messaging service, or
// QPS from loadbalancer running outside of cluster).
// +optional
External *ExternalMetricSource
}
// ObjectMetricSource indicates how to scale on a metric describing a
@ -184,6 +197,26 @@ type ResourceMetricSource struct {
TargetAverageValue *resource.Quantity
}
// ExternalMetricSource indicates how to scale on a metric not associated with
// any Kubernetes object (for example length of queue in cloud
// messaging service, or QPS from loadbalancer running outside of cluster).
type ExternalMetricSource struct {
// metricName is the name of the metric in question.
MetricName string
// MetricSelector is used to identify a specific time series
// within a given metric.
// +optional
MetricSelector *metav1.LabelSelector
// TargetValue is the target value of the metric (as a quantity).
// Mutually exclusive with TargetAverageValue.
// +optional
TargetValue *resource.Quantity
// TargetAverageValue is the target per-pod value of global metric (as a quantity).
// Mutually exclusive with TargetValue.
// +optional
TargetAverageValue *resource.Quantity
}
// HorizontalPodAutoscalerStatus describes the current status of a horizontal pod autoscaler.
type HorizontalPodAutoscalerStatus struct {
// ObservedGeneration is the most recent generation observed by this autoscaler.
@ -282,6 +315,13 @@ type MetricStatus struct {
// to normal per-pod metrics using the "pods" source.
// +optional
Resource *ResourceMetricStatus
// External refers to a global metric that is not associated
// with any Kubernetes object. It allows autoscaling based on information
// coming from components running outside of cluster
// (for example length of queue in cloud messaging service, or
// QPS from loadbalancer running outside of cluster).
// +optional
External *ExternalMetricStatus
}
// ObjectMetricStatus indicates the current value of a metric describing a
@ -328,6 +368,23 @@ type ResourceMetricStatus struct {
CurrentAverageValue resource.Quantity
}
// ExternalMetricStatus indicates the current value of a global metric
// not associated with any Kubernetes object.
type ExternalMetricStatus struct {
// MetricName is the name of a metric used for autoscaling in
// metric system.
MetricName string
// MetricSelector is used to identify a specific time series
// within a given metric.
// +optional
MetricSelector *metav1.LabelSelector
// CurrentValue is the current value of the metric (as a quantity)
CurrentValue resource.Quantity
// CurrentAverageValue is the current value of metric averaged over autoscaled pods.
// +optional
CurrentAverageValue *resource.Quantity
}
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object

View File

@ -16,12 +16,11 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
// This file was autogenerated by deepcopy-gen. Do not edit it manually!
// Code generated by deepcopy-gen. DO NOT EDIT.
package autoscaling
import (
resource "k8s.io/apimachinery/pkg/api/resource"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
)
@ -42,6 +41,84 @@ func (in *CrossVersionObjectReference) DeepCopy() *CrossVersionObjectReference {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ExternalMetricSource) DeepCopyInto(out *ExternalMetricSource) {
*out = *in
if in.MetricSelector != nil {
in, out := &in.MetricSelector, &out.MetricSelector
if *in == nil {
*out = nil
} else {
*out = new(v1.LabelSelector)
(*in).DeepCopyInto(*out)
}
}
if in.TargetValue != nil {
in, out := &in.TargetValue, &out.TargetValue
if *in == nil {
*out = nil
} else {
x := (*in).DeepCopy()
*out = &x
}
}
if in.TargetAverageValue != nil {
in, out := &in.TargetAverageValue, &out.TargetAverageValue
if *in == nil {
*out = nil
} else {
x := (*in).DeepCopy()
*out = &x
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalMetricSource.
func (in *ExternalMetricSource) DeepCopy() *ExternalMetricSource {
if in == nil {
return nil
}
out := new(ExternalMetricSource)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ExternalMetricStatus) DeepCopyInto(out *ExternalMetricStatus) {
*out = *in
if in.MetricSelector != nil {
in, out := &in.MetricSelector, &out.MetricSelector
if *in == nil {
*out = nil
} else {
*out = new(v1.LabelSelector)
(*in).DeepCopyInto(*out)
}
}
out.CurrentValue = in.CurrentValue.DeepCopy()
if in.CurrentAverageValue != nil {
in, out := &in.CurrentAverageValue, &out.CurrentAverageValue
if *in == nil {
*out = nil
} else {
x := (*in).DeepCopy()
*out = &x
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalMetricStatus.
func (in *ExternalMetricStatus) DeepCopy() *ExternalMetricStatus {
if in == nil {
return nil
}
out := new(ExternalMetricStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *HorizontalPodAutoscaler) DeepCopyInto(out *HorizontalPodAutoscaler) {
*out = *in
@ -66,9 +143,8 @@ func (in *HorizontalPodAutoscaler) DeepCopy() *HorizontalPodAutoscaler {
func (in *HorizontalPodAutoscaler) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -117,9 +193,8 @@ func (in *HorizontalPodAutoscalerList) DeepCopy() *HorizontalPodAutoscalerList {
func (in *HorizontalPodAutoscalerList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -172,8 +247,7 @@ func (in *HorizontalPodAutoscalerStatus) DeepCopyInto(out *HorizontalPodAutoscal
if *in == nil {
*out = nil
} else {
*out = new(v1.Time)
(*in).DeepCopyInto(*out)
*out = (*in).DeepCopy()
}
}
if in.CurrentMetrics != nil {
@ -233,6 +307,15 @@ func (in *MetricSpec) DeepCopyInto(out *MetricSpec) {
(*in).DeepCopyInto(*out)
}
}
if in.External != nil {
in, out := &in.External, &out.External
if *in == nil {
*out = nil
} else {
*out = new(ExternalMetricSource)
(*in).DeepCopyInto(*out)
}
}
return
}
@ -276,6 +359,15 @@ func (in *MetricStatus) DeepCopyInto(out *MetricStatus) {
(*in).DeepCopyInto(*out)
}
}
if in.External != nil {
in, out := &in.External, &out.External
if *in == nil {
*out = nil
} else {
*out = new(ExternalMetricStatus)
(*in).DeepCopyInto(*out)
}
}
return
}
@ -376,8 +468,8 @@ func (in *ResourceMetricSource) DeepCopyInto(out *ResourceMetricSource) {
if *in == nil {
*out = nil
} else {
*out = new(resource.Quantity)
**out = (*in).DeepCopy()
x := (*in).DeepCopy()
*out = &x
}
}
return
@ -443,9 +535,8 @@ func (in *Scale) DeepCopy() *Scale {
func (in *Scale) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

View File

@ -28,6 +28,7 @@ import (
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/selection"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/validation"
"k8s.io/kubernetes/pkg/apis/core"
)
@ -146,10 +147,21 @@ func IsStandardContainerResourceName(str string) bool {
return standardContainerResources.Has(str) || IsHugePageResourceName(core.ResourceName(str))
}
// IsExtendedResourceName returns true if the resource name is not in the
// default namespace.
// IsExtendedResourceName returns true if:
// 1. the resource name is not in the default namespace;
// 2. resource name does not have "requests." prefix,
// to avoid confusion with the convention in quota
// 3. it satisfies the rules in IsQualifiedName() after converted into quota resource name
func IsExtendedResourceName(name core.ResourceName) bool {
return !IsDefaultNamespaceResource(name)
if IsDefaultNamespaceResource(name) || strings.HasPrefix(string(name), core.DefaultResourceRequestsPrefix) {
return false
}
// Ensure it satisfies the rules in IsQualifiedName() after converted into quota resource name
nameForQuota := fmt.Sprintf("%s%s", core.DefaultResourceRequestsPrefix, string(name))
if errs := validation.IsQualifiedName(string(nameForQuota)); len(errs) != 0 {
return false
}
return true
}
// IsDefaultNamespaceResource returns true if the resource name is in the

View File

@ -391,7 +391,7 @@ type PersistentVolumeSource struct {
// More info: https://releases.k8s.io/HEAD/examples/volumes/storageos/README.md
// +optional
StorageOS *StorageOSPersistentVolumeSource
// CSI (Container Storage Interface) represents storage that handled by an external CSI driver
// CSI (Container Storage Interface) represents storage that handled by an external CSI driver (Beta feature).
// +optional
CSI *CSIPersistentVolumeSource
}
@ -467,6 +467,16 @@ type PersistentVolumeSpec struct {
// This is an alpha feature and may change in the future.
// +optional
VolumeMode *PersistentVolumeMode
// NodeAffinity defines constraints that limit what nodes this volume can be accessed from.
// This field influences the scheduling of pods that use this volume.
// +optional
NodeAffinity *VolumeNodeAffinity
}
// VolumeNodeAffinity defines constraints that limit what nodes this volume can be accessed from.
type VolumeNodeAffinity struct {
// Required specifies hard node constraints that must be met.
Required *NodeSelector
}
// PersistentVolumeReclaimPolicy describes a policy for end-of-life maintenance of persistent volumes
@ -1603,7 +1613,7 @@ type LocalVolumeSource struct {
Path string
}
// Represents storage that is managed by an external CSI volume driver
// Represents storage that is managed by an external CSI volume driver (Beta feature)
type CSIPersistentVolumeSource struct {
// Driver is the name of the driver to use for this volume.
// Required.
@ -1624,6 +1634,34 @@ type CSIPersistentVolumeSource struct {
// Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
// +optional
FSType string
// Attributes of the volume to publish.
// +optional
VolumeAttributes map[string]string
// ControllerPublishSecretRef is a reference to the secret object containing
// sensitive information to pass to the CSI driver to complete the CSI
// ControllerPublishVolume and ControllerUnpublishVolume calls.
// This field is optional, and may be empty if no secret is required. If the
// secret object contains more than one secret, all secrets are passed.
// +optional
ControllerPublishSecretRef *SecretReference
// NodeStageSecretRef is a reference to the secret object containing sensitive
// information to pass to the CSI driver to complete the CSI NodeStageVolume
// and NodeStageVolume and NodeUnstageVolume calls.
// This field is optional, and may be empty if no secret is required. If the
// secret object contains more than one secret, all secrets are passed.
// +optional
NodeStageSecretRef *SecretReference
// NodePublishSecretRef is a reference to the secret object containing
// sensitive information to pass to the CSI driver to complete the CSI
// NodePublishVolume and NodeUnpublishVolume calls.
// This field is optional, and may be empty if no secret is required. If the
// secret object contains more than one secret, all secrets are passed.
// +optional
NodePublishSecretRef *SecretReference
}
// ContainerPort represents a network port in a single container
@ -2634,6 +2672,15 @@ type PodSecurityContext struct {
// +k8s:conversion-gen=false
// +optional
HostIPC bool
// Share a single process namespace between all of the containers in a pod.
// When this is set containers will be able to view and signal processes from other containers
// in the same pod, and the first process in each container will not be assigned PID 1.
// HostPID and ShareProcessNamespace cannot both be set.
// Optional: Default to false.
// This field is alpha-level and is honored only by servers that enable the PodShareProcessNamespace feature.
// +k8s:conversion-gen=false
// +optional
ShareProcessNamespace *bool
// The SELinux context to be applied to all containers.
// If unspecified, the container runtime will allocate a random SELinux context for each
// container. May also be set in SecurityContext. If set in
@ -3533,8 +3580,8 @@ const (
NodeDiskPressure NodeConditionType = "DiskPressure"
// NodeNetworkUnavailable means that network for the node is not correctly configured.
NodeNetworkUnavailable NodeConditionType = "NetworkUnavailable"
// NodeConfigOK indicates whether the kubelet is correctly configured
NodeConfigOK NodeConditionType = "ConfigOK"
// NodeKubeletConfigOk indicates whether the kubelet is correctly configured
NodeKubeletConfigOk NodeConditionType = "KubeletConfigOk"
)
type NodeCondition struct {
@ -4202,6 +4249,8 @@ const (
// HugePages request, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024)
// As burst is not supported for HugePages, we would only quota its request, and ignore the limit.
ResourceRequestsHugePagesPrefix = "requests.hugepages-"
// Default resource requests prefix
DefaultResourceRequestsPrefix = "requests."
)
// A ResourceQuotaScope defines a filter that must match each object tracked by a quota

View File

@ -350,6 +350,10 @@ func Convert_core_PodTemplateSpec_To_v1_PodTemplateSpec(in *core.PodTemplateSpec
return err
}
// drop init container annotations so they don't take effect on legacy kubelets.
// remove this once the oldest supported kubelet no longer honors the annotations over the field.
out.Annotations = dropInitContainerAnnotations(out.Annotations)
return nil
}
@ -358,6 +362,9 @@ func Convert_v1_PodTemplateSpec_To_core_PodTemplateSpec(in *v1.PodTemplateSpec,
return err
}
// drop init container annotations so they don't show up as differences when receiving requests from old clients
out.Annotations = dropInitContainerAnnotations(out.Annotations)
return nil
}
@ -377,6 +384,7 @@ func Convert_core_PodSpec_To_v1_PodSpec(in *core.PodSpec, out *v1.PodSpec, s con
out.HostPID = in.SecurityContext.HostPID
out.HostNetwork = in.SecurityContext.HostNetwork
out.HostIPC = in.SecurityContext.HostIPC
out.ShareProcessNamespace = in.SecurityContext.ShareProcessNamespace
}
return nil
@ -401,6 +409,18 @@ func Convert_v1_PodSpec_To_core_PodSpec(in *v1.PodSpec, out *core.PodSpec, s con
out.SecurityContext.HostNetwork = in.HostNetwork
out.SecurityContext.HostPID = in.HostPID
out.SecurityContext.HostIPC = in.HostIPC
out.SecurityContext.ShareProcessNamespace = in.ShareProcessNamespace
return nil
}
func Convert_v1_Pod_To_core_Pod(in *v1.Pod, out *core.Pod, s conversion.Scope) error {
if err := autoConvert_v1_Pod_To_core_Pod(in, out, s); err != nil {
return err
}
// drop init container annotations so they don't show up as differences when receiving requests from old clients
out.Annotations = dropInitContainerAnnotations(out.Annotations)
return nil
}
@ -412,17 +432,7 @@ func Convert_core_Pod_To_v1_Pod(in *core.Pod, out *v1.Pod, s conversion.Scope) e
// drop init container annotations so they don't take effect on legacy kubelets.
// remove this once the oldest supported kubelet no longer honors the annotations over the field.
if len(out.Annotations) > 0 {
old := out.Annotations
out.Annotations = make(map[string]string, len(old))
for k, v := range old {
out.Annotations[k] = v
}
delete(out.Annotations, "pod.beta.kubernetes.io/init-containers")
delete(out.Annotations, "pod.alpha.kubernetes.io/init-containers")
delete(out.Annotations, "pod.beta.kubernetes.io/init-container-statuses")
delete(out.Annotations, "pod.alpha.kubernetes.io/init-container-statuses")
}
out.Annotations = dropInitContainerAnnotations(out.Annotations)
return nil
}
@ -570,3 +580,40 @@ func AddFieldLabelConversionsForSecret(scheme *runtime.Scheme) error {
}
})
}
var initContainerAnnotations = map[string]bool{
"pod.beta.kubernetes.io/init-containers": true,
"pod.alpha.kubernetes.io/init-containers": true,
"pod.beta.kubernetes.io/init-container-statuses": true,
"pod.alpha.kubernetes.io/init-container-statuses": true,
}
// dropInitContainerAnnotations returns a copy of the annotations with init container annotations removed,
// or the original annotations if no init container annotations were present.
//
// this can be removed once no clients prior to 1.8 are supported, and no kubelets prior to 1.8 can be run
// (we don't support kubelets older than 2 versions skewed from the apiserver, but we don't prevent them, either)
func dropInitContainerAnnotations(oldAnnotations map[string]string) map[string]string {
if len(oldAnnotations) == 0 {
return oldAnnotations
}
found := false
for k := range initContainerAnnotations {
if _, ok := oldAnnotations[k]; ok {
found = true
break
}
}
if !found {
return oldAnnotations
}
newAnnotations := make(map[string]string, len(oldAnnotations))
for k, v := range oldAnnotations {
if !initContainerAnnotations[k] {
newAnnotations[k] = v
}
}
return newAnnotations
}

View File

@ -26,13 +26,25 @@ import (
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/selection"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/validation"
"k8s.io/kubernetes/pkg/apis/core/helper"
)
// IsExtendedResourceName returns true if the resource name is not in the
// default namespace.
// IsExtendedResourceName returns true if:
// 1. the resource name is not in the default namespace;
// 2. resource name does not have "requests." prefix,
// to avoid confusion with the convention in quota
// 3. it satisfies the rules in IsQualifiedName() after converted into quota resource name
func IsExtendedResourceName(name v1.ResourceName) bool {
return !IsDefaultNamespaceResource(name)
if IsDefaultNamespaceResource(name) || strings.HasPrefix(string(name), v1.DefaultResourceRequestsPrefix) {
return false
}
// Ensure it satisfies the rules in IsQualifiedName() after converted into quota resource name
nameForQuota := fmt.Sprintf("%s%s", v1.DefaultResourceRequestsPrefix, string(name))
if errs := validation.IsQualifiedName(string(nameForQuota)); len(errs) != 0 {
return false
}
return true
}
// IsDefaultNamespaceResource returns true if the resource name is in the

View File

@ -16,7 +16,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
// This file was autogenerated by conversion-gen. Do not edit it manually!
// Code generated by conversion-gen. DO NOT EDIT.
package v1
@ -406,6 +406,8 @@ func RegisterConversions(scheme *runtime.Scheme) error {
Convert_core_VolumeDevice_To_v1_VolumeDevice,
Convert_v1_VolumeMount_To_core_VolumeMount,
Convert_core_VolumeMount_To_v1_VolumeMount,
Convert_v1_VolumeNodeAffinity_To_core_VolumeNodeAffinity,
Convert_core_VolumeNodeAffinity_To_v1_VolumeNodeAffinity,
Convert_v1_VolumeProjection_To_core_VolumeProjection,
Convert_core_VolumeProjection_To_v1_VolumeProjection,
Convert_v1_VolumeSource_To_core_VolumeSource,
@ -620,6 +622,10 @@ func autoConvert_v1_CSIPersistentVolumeSource_To_core_CSIPersistentVolumeSource(
out.VolumeHandle = in.VolumeHandle
out.ReadOnly = in.ReadOnly
out.FSType = in.FSType
out.VolumeAttributes = *(*map[string]string)(unsafe.Pointer(&in.VolumeAttributes))
out.ControllerPublishSecretRef = (*core.SecretReference)(unsafe.Pointer(in.ControllerPublishSecretRef))
out.NodeStageSecretRef = (*core.SecretReference)(unsafe.Pointer(in.NodeStageSecretRef))
out.NodePublishSecretRef = (*core.SecretReference)(unsafe.Pointer(in.NodePublishSecretRef))
return nil
}
@ -633,6 +639,10 @@ func autoConvert_core_CSIPersistentVolumeSource_To_v1_CSIPersistentVolumeSource(
out.VolumeHandle = in.VolumeHandle
out.ReadOnly = in.ReadOnly
out.FSType = in.FSType
out.VolumeAttributes = *(*map[string]string)(unsafe.Pointer(&in.VolumeAttributes))
out.ControllerPublishSecretRef = (*v1.SecretReference)(unsafe.Pointer(in.ControllerPublishSecretRef))
out.NodeStageSecretRef = (*v1.SecretReference)(unsafe.Pointer(in.NodeStageSecretRef))
out.NodePublishSecretRef = (*v1.SecretReference)(unsafe.Pointer(in.NodePublishSecretRef))
return nil
}
@ -3346,6 +3356,7 @@ func autoConvert_v1_PersistentVolumeSpec_To_core_PersistentVolumeSpec(in *v1.Per
out.StorageClassName = in.StorageClassName
out.MountOptions = *(*[]string)(unsafe.Pointer(&in.MountOptions))
out.VolumeMode = (*core.PersistentVolumeMode)(unsafe.Pointer(in.VolumeMode))
out.NodeAffinity = (*core.VolumeNodeAffinity)(unsafe.Pointer(in.NodeAffinity))
return nil
}
@ -3365,6 +3376,7 @@ func autoConvert_core_PersistentVolumeSpec_To_v1_PersistentVolumeSpec(in *core.P
out.StorageClassName = in.StorageClassName
out.MountOptions = *(*[]string)(unsafe.Pointer(&in.MountOptions))
out.VolumeMode = (*v1.PersistentVolumeMode)(unsafe.Pointer(in.VolumeMode))
out.NodeAffinity = (*v1.VolumeNodeAffinity)(unsafe.Pointer(in.NodeAffinity))
return nil
}
@ -3430,11 +3442,6 @@ func autoConvert_v1_Pod_To_core_Pod(in *v1.Pod, out *core.Pod, s conversion.Scop
return nil
}
// Convert_v1_Pod_To_core_Pod is an autogenerated conversion function.
func Convert_v1_Pod_To_core_Pod(in *v1.Pod, out *core.Pod, s conversion.Scope) error {
return autoConvert_v1_Pod_To_core_Pod(in, out, s)
}
func autoConvert_core_Pod_To_v1_Pod(in *core.Pod, out *v1.Pod, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_core_PodSpec_To_v1_PodSpec(&in.Spec, &out.Spec, s); err != nil {
@ -3777,6 +3784,7 @@ func autoConvert_core_PodSecurityContext_To_v1_PodSecurityContext(in *core.PodSe
// INFO: in.HostNetwork opted out of conversion generation
// INFO: in.HostPID opted out of conversion generation
// INFO: in.HostIPC opted out of conversion generation
// INFO: in.ShareProcessNamespace opted out of conversion generation
out.SELinuxOptions = (*v1.SELinuxOptions)(unsafe.Pointer(in.SELinuxOptions))
out.RunAsUser = (*int64)(unsafe.Pointer(in.RunAsUser))
out.RunAsNonRoot = (*bool)(unsafe.Pointer(in.RunAsNonRoot))
@ -3851,6 +3859,7 @@ func autoConvert_v1_PodSpec_To_core_PodSpec(in *v1.PodSpec, out *core.PodSpec, s
// INFO: in.HostNetwork opted out of conversion generation
// INFO: in.HostPID opted out of conversion generation
// INFO: in.HostIPC opted out of conversion generation
// INFO: in.ShareProcessNamespace opted out of conversion generation
if in.SecurityContext != nil {
in, out := &in.SecurityContext, &out.SecurityContext
*out = new(core.PodSecurityContext)
@ -5508,6 +5517,26 @@ func Convert_core_VolumeMount_To_v1_VolumeMount(in *core.VolumeMount, out *v1.Vo
return autoConvert_core_VolumeMount_To_v1_VolumeMount(in, out, s)
}
func autoConvert_v1_VolumeNodeAffinity_To_core_VolumeNodeAffinity(in *v1.VolumeNodeAffinity, out *core.VolumeNodeAffinity, s conversion.Scope) error {
out.Required = (*core.NodeSelector)(unsafe.Pointer(in.Required))
return nil
}
// Convert_v1_VolumeNodeAffinity_To_core_VolumeNodeAffinity is an autogenerated conversion function.
func Convert_v1_VolumeNodeAffinity_To_core_VolumeNodeAffinity(in *v1.VolumeNodeAffinity, out *core.VolumeNodeAffinity, s conversion.Scope) error {
return autoConvert_v1_VolumeNodeAffinity_To_core_VolumeNodeAffinity(in, out, s)
}
func autoConvert_core_VolumeNodeAffinity_To_v1_VolumeNodeAffinity(in *core.VolumeNodeAffinity, out *v1.VolumeNodeAffinity, s conversion.Scope) error {
out.Required = (*v1.NodeSelector)(unsafe.Pointer(in.Required))
return nil
}
// Convert_core_VolumeNodeAffinity_To_v1_VolumeNodeAffinity is an autogenerated conversion function.
func Convert_core_VolumeNodeAffinity_To_v1_VolumeNodeAffinity(in *core.VolumeNodeAffinity, out *v1.VolumeNodeAffinity, s conversion.Scope) error {
return autoConvert_core_VolumeNodeAffinity_To_v1_VolumeNodeAffinity(in, out, s)
}
func autoConvert_v1_VolumeProjection_To_core_VolumeProjection(in *v1.VolumeProjection, out *core.VolumeProjection, s conversion.Scope) error {
out.Secret = (*core.SecretProjection)(unsafe.Pointer(in.Secret))
out.DownwardAPI = (*core.DownwardAPIProjection)(unsafe.Pointer(in.DownwardAPI))

View File

@ -16,7 +16,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
// This file was autogenerated by defaulter-gen. Do not edit it manually!
// Code generated by defaulter-gen. DO NOT EDIT.
package v1

View File

@ -1383,6 +1383,9 @@ func validateLocalVolumeSource(ls *core.LocalVolumeSource, fldPath *field.Path)
return allErrs
}
if !path.IsAbs(ls.Path) {
allErrs = append(allErrs, field.Invalid(fldPath, ls.Path, "must be an absolute path"))
}
allErrs = append(allErrs, validatePathNoBacksteps(ls.Path, fldPath.Child("path"))...)
return allErrs
}
@ -1449,6 +1452,45 @@ func validateCSIPersistentVolumeSource(csi *core.CSIPersistentVolumeSource, fldP
allErrs = append(allErrs, field.Required(fldPath.Child("volumeHandle"), ""))
}
if csi.ControllerPublishSecretRef != nil {
if len(csi.ControllerPublishSecretRef.Name) == 0 {
allErrs = append(allErrs, field.Required(fldPath.Child("controllerPublishSecretRef", "name"), ""))
} else {
allErrs = append(allErrs, ValidateDNS1123Label(csi.ControllerPublishSecretRef.Name, fldPath.Child("name"))...)
}
if len(csi.ControllerPublishSecretRef.Namespace) == 0 {
allErrs = append(allErrs, field.Required(fldPath.Child("controllerPublishSecretRef", "namespace"), ""))
} else {
allErrs = append(allErrs, ValidateDNS1123Label(csi.ControllerPublishSecretRef.Namespace, fldPath.Child("namespace"))...)
}
}
if csi.NodePublishSecretRef != nil {
if len(csi.NodePublishSecretRef.Name) == 0 {
allErrs = append(allErrs, field.Required(fldPath.Child("nodePublishSecretRef ", "name"), ""))
} else {
allErrs = append(allErrs, ValidateDNS1123Label(csi.NodePublishSecretRef.Name, fldPath.Child("name"))...)
}
if len(csi.NodePublishSecretRef.Namespace) == 0 {
allErrs = append(allErrs, field.Required(fldPath.Child("nodePublishSecretRef ", "namespace"), ""))
} else {
allErrs = append(allErrs, ValidateDNS1123Label(csi.NodePublishSecretRef.Namespace, fldPath.Child("namespace"))...)
}
}
if csi.NodeStageSecretRef != nil {
if len(csi.NodeStageSecretRef.Name) == 0 {
allErrs = append(allErrs, field.Required(fldPath.Child("nodeStageSecretRef", "name"), ""))
} else {
allErrs = append(allErrs, ValidateDNS1123Label(csi.NodeStageSecretRef.Name, fldPath.Child("name"))...)
}
if len(csi.NodeStageSecretRef.Namespace) == 0 {
allErrs = append(allErrs, field.Required(fldPath.Child("nodeStageSecretRef", "namespace"), ""))
} else {
allErrs = append(allErrs, ValidateDNS1123Label(csi.NodeStageSecretRef.Namespace, fldPath.Child("namespace"))...)
}
}
return allErrs
}
@ -1497,6 +1539,15 @@ func ValidatePersistentVolume(pv *core.PersistentVolume) field.ErrorList {
nodeAffinitySpecified, errs := validateStorageNodeAffinityAnnotation(pv.ObjectMeta.Annotations, metaPath.Child("annotations"))
allErrs = append(allErrs, errs...)
volumeNodeAffinitySpecified, errs := validateVolumeNodeAffinity(pv.Spec.NodeAffinity, specPath.Child("nodeAffinity"))
allErrs = append(allErrs, errs...)
if nodeAffinitySpecified && volumeNodeAffinitySpecified {
allErrs = append(allErrs, field.Forbidden(specPath.Child("nodeAffinity"), "may not specify both alpha nodeAffinity annotation and nodeAffinity field"))
}
nodeAffinitySpecified = nodeAffinitySpecified || volumeNodeAffinitySpecified
numVolumes := 0
if pv.Spec.HostPath != nil {
if numVolumes > 0 {
@ -1725,6 +1776,13 @@ func ValidatePersistentVolumeUpdate(newPv, oldPv *core.PersistentVolume) field.E
allErrs = append(allErrs, ValidateImmutableField(newPv.Spec.VolumeMode, oldPv.Spec.VolumeMode, field.NewPath("volumeMode"))...)
}
if utilfeature.DefaultFeatureGate.Enabled(features.VolumeScheduling) {
// Allow setting NodeAffinity if oldPv NodeAffinity was not set
if oldPv.Spec.NodeAffinity != nil {
allErrs = append(allErrs, ValidateImmutableField(newPv.Spec.NodeAffinity, oldPv.Spec.NodeAffinity, field.NewPath("nodeAffinity"))...)
}
}
return allErrs
}
@ -2917,19 +2975,13 @@ func ValidatePodSpec(spec *core.PodSpec, fldPath *field.Path) field.ErrorList {
}
if len(spec.PriorityClassName) > 0 {
if !utilfeature.DefaultFeatureGate.Enabled(features.PodPriority) {
allErrs = append(allErrs, field.Forbidden(fldPath.Child("priorityClassName"), "Pod priority is disabled by feature-gate"))
} else {
if utilfeature.DefaultFeatureGate.Enabled(features.PodPriority) {
for _, msg := range ValidatePriorityClassName(spec.PriorityClassName, false) {
allErrs = append(allErrs, field.Invalid(fldPath.Child("priorityClassName"), spec.PriorityClassName, msg))
}
}
}
if spec.Priority != nil && !utilfeature.DefaultFeatureGate.Enabled(features.PodPriority) {
allErrs = append(allErrs, field.Forbidden(fldPath.Child("priority"), "Pod priority is disabled by feature-gate"))
}
return allErrs
}
@ -3251,6 +3303,13 @@ func ValidatePodSecurityContext(securityContext *core.PodSecurityContext, spec *
allErrs = append(allErrs, field.Invalid(fldPath.Child("supplementalGroups").Index(g), gid, msg))
}
}
if securityContext.ShareProcessNamespace != nil {
if !utilfeature.DefaultFeatureGate.Enabled(features.PodShareProcessNamespace) {
allErrs = append(allErrs, field.Forbidden(fldPath.Child("shareProcessNamespace"), "Process Namespace Sharing is disabled by PodShareProcessNamespace feature-gate"))
} else if securityContext.HostPID && *securityContext.ShareProcessNamespace {
allErrs = append(allErrs, field.Invalid(fldPath.Child("shareProcessNamespace"), *securityContext.ShareProcessNamespace, "ShareProcessNamespace and HostPID cannot both be enabled"))
}
}
}
return allErrs
@ -4071,6 +4130,10 @@ func validateContainerResourceName(value string, fldPath *field.Path) field.Erro
if !helper.IsStandardContainerResourceName(value) {
return append(allErrs, field.Invalid(fldPath, value, "must be a standard resource for containers"))
}
} else if !helper.IsDefaultNamespaceResource(core.ResourceName(value)) {
if !helper.IsExtendedResourceName(core.ResourceName(value)) {
return append(allErrs, field.Invalid(fldPath, value, "doesn't follow extended resource name standard"))
}
}
return allErrs
}
@ -4252,7 +4315,8 @@ func ValidateLimitRange(limitRange *core.LimitRange) field.ErrorList {
}
}
// for GPU and hugepages, the default value and defaultRequest value must match if both are specified
// for GPU, hugepages and other resources that are not allowed to overcommit,
// the default value and defaultRequest value must match if both are specified
if !helper.IsOvercommitAllowed(core.ResourceName(k)) && defaultQuantityFound && defaultRequestQuantityFound && defaultQuantity.Cmp(defaultRequestQuantity) != 0 {
allErrs = append(allErrs, field.Invalid(idxPath.Child("defaultRequest").Key(string(k)), defaultRequestQuantity, fmt.Sprintf("default value %s must equal to defaultRequest value %s in %s", defaultQuantity.String(), defaultRequestQuantity.String(), k)))
}
@ -4942,7 +5006,7 @@ func validateStorageNodeAffinityAnnotation(annotations map[string]string, fldPat
return false, allErrs
}
if !utilfeature.DefaultFeatureGate.Enabled(features.PersistentLocalVolumes) {
if !utilfeature.DefaultFeatureGate.Enabled(features.VolumeScheduling) {
allErrs = append(allErrs, field.Forbidden(fldPath, "Storage node affinity is disabled by feature-gate"))
}
@ -4958,6 +5022,30 @@ func validateStorageNodeAffinityAnnotation(annotations map[string]string, fldPat
return policySpecified, allErrs
}
// validateVolumeNodeAffinity tests that the PersistentVolume.NodeAffinity has valid data
// returns:
// - true if volumeNodeAffinity is set
// - errorList if there are validation errors
func validateVolumeNodeAffinity(nodeAffinity *core.VolumeNodeAffinity, fldPath *field.Path) (bool, field.ErrorList) {
allErrs := field.ErrorList{}
if nodeAffinity == nil {
return false, allErrs
}
if !utilfeature.DefaultFeatureGate.Enabled(features.VolumeScheduling) {
allErrs = append(allErrs, field.Forbidden(fldPath, "Volume node affinity is disabled by feature-gate"))
}
if nodeAffinity.Required != nil {
allErrs = append(allErrs, ValidateNodeSelector(nodeAffinity.Required, fldPath.Child("required"))...)
} else {
allErrs = append(allErrs, field.Required(fldPath.Child("required"), "must specify required node constraints"))
}
return true, allErrs
}
// ValidateCIDR validates whether a CIDR matches the conventions expected by net.ParseCIDR
func ValidateCIDR(cidr string) (*net.IPNet, error) {
_, net, err := net.ParseCIDR(cidr)

View File

@ -16,12 +16,11 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
// This file was autogenerated by deepcopy-gen. Do not edit it manually!
// Code generated by deepcopy-gen. DO NOT EDIT.
package core
import (
resource "k8s.io/apimachinery/pkg/api/resource"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
types "k8s.io/apimachinery/pkg/types"
@ -241,14 +240,47 @@ func (in *Binding) DeepCopy() *Binding {
func (in *Binding) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CSIPersistentVolumeSource) DeepCopyInto(out *CSIPersistentVolumeSource) {
*out = *in
if in.VolumeAttributes != nil {
in, out := &in.VolumeAttributes, &out.VolumeAttributes
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.ControllerPublishSecretRef != nil {
in, out := &in.ControllerPublishSecretRef, &out.ControllerPublishSecretRef
if *in == nil {
*out = nil
} else {
*out = new(SecretReference)
**out = **in
}
}
if in.NodeStageSecretRef != nil {
in, out := &in.NodeStageSecretRef, &out.NodeStageSecretRef
if *in == nil {
*out = nil
} else {
*out = new(SecretReference)
**out = **in
}
}
if in.NodePublishSecretRef != nil {
in, out := &in.NodePublishSecretRef, &out.NodePublishSecretRef
if *in == nil {
*out = nil
} else {
*out = new(SecretReference)
**out = **in
}
}
return
}
@ -432,9 +464,8 @@ func (in *ComponentStatus) DeepCopy() *ComponentStatus {
func (in *ComponentStatus) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -466,9 +497,8 @@ func (in *ComponentStatusList) DeepCopy() *ComponentStatusList {
func (in *ComponentStatusList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -512,9 +542,8 @@ func (in *ConfigMap) DeepCopy() *ConfigMap {
func (in *ConfigMap) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -598,9 +627,8 @@ func (in *ConfigMapList) DeepCopy() *ConfigMapList {
func (in *ConfigMapList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -994,9 +1022,8 @@ func (in *DeleteOptions) DeepCopy() *DeleteOptions {
func (in *DeleteOptions) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -1105,8 +1132,8 @@ func (in *EmptyDirVolumeSource) DeepCopyInto(out *EmptyDirVolumeSource) {
if *in == nil {
*out = nil
} else {
*out = new(resource.Quantity)
**out = (*in).DeepCopy()
x := (*in).DeepCopy()
*out = &x
}
}
return
@ -1236,9 +1263,8 @@ func (in *Endpoints) DeepCopy() *Endpoints {
func (in *Endpoints) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -1270,9 +1296,8 @@ func (in *EndpointsList) DeepCopy() *EndpointsList {
func (in *EndpointsList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -1431,9 +1456,8 @@ func (in *Event) DeepCopy() *Event {
func (in *Event) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -1465,9 +1489,8 @@ func (in *EventList) DeepCopy() *EventList {
func (in *EventList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -1974,9 +1997,8 @@ func (in *LimitRange) DeepCopy() *LimitRange {
func (in *LimitRange) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -2059,9 +2081,8 @@ func (in *LimitRangeList) DeepCopy() *LimitRangeList {
func (in *LimitRangeList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -2120,9 +2141,8 @@ func (in *List) DeepCopy() *List {
func (in *List) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -2165,9 +2185,8 @@ func (in *ListOptions) DeepCopy() *ListOptions {
func (in *ListOptions) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -2279,9 +2298,8 @@ func (in *Namespace) DeepCopy() *Namespace {
func (in *Namespace) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -2313,9 +2331,8 @@ func (in *NamespaceList) DeepCopy() *NamespaceList {
func (in *NamespaceList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -2379,9 +2396,8 @@ func (in *Node) DeepCopy() *Node {
func (in *Node) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -2480,9 +2496,8 @@ func (in *NodeConfigSource) DeepCopy() *NodeConfigSource {
func (in *NodeConfigSource) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -2531,9 +2546,8 @@ func (in *NodeList) DeepCopy() *NodeList {
func (in *NodeList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -2557,9 +2571,8 @@ func (in *NodeProxyOptions) DeepCopy() *NodeProxyOptions {
func (in *NodeProxyOptions) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -2786,8 +2799,7 @@ func (in *ObjectMeta) DeepCopyInto(out *ObjectMeta) {
if *in == nil {
*out = nil
} else {
*out = new(v1.Time)
(*in).DeepCopyInto(*out)
*out = (*in).DeepCopy()
}
}
if in.DeletionGracePeriodSeconds != nil {
@ -2867,9 +2879,8 @@ func (in *ObjectReference) DeepCopy() *ObjectReference {
func (in *ObjectReference) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -2896,9 +2907,8 @@ func (in *PersistentVolume) DeepCopy() *PersistentVolume {
func (in *PersistentVolume) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -2925,9 +2935,8 @@ func (in *PersistentVolumeClaim) DeepCopy() *PersistentVolumeClaim {
func (in *PersistentVolumeClaim) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -2977,9 +2986,8 @@ func (in *PersistentVolumeClaimList) DeepCopy() *PersistentVolumeClaimList {
func (in *PersistentVolumeClaimList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -3111,9 +3119,8 @@ func (in *PersistentVolumeList) DeepCopy() *PersistentVolumeList {
func (in *PersistentVolumeList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -3314,7 +3321,7 @@ func (in *PersistentVolumeSource) DeepCopyInto(out *PersistentVolumeSource) {
*out = nil
} else {
*out = new(CSIPersistentVolumeSource)
**out = **in
(*in).DeepCopyInto(*out)
}
}
return
@ -3369,6 +3376,15 @@ func (in *PersistentVolumeSpec) DeepCopyInto(out *PersistentVolumeSpec) {
**out = **in
}
}
if in.NodeAffinity != nil {
in, out := &in.NodeAffinity, &out.NodeAffinity
if *in == nil {
*out = nil
} else {
*out = new(VolumeNodeAffinity)
(*in).DeepCopyInto(*out)
}
}
return
}
@ -3438,9 +3454,8 @@ func (in *Pod) DeepCopy() *Pod {
func (in *Pod) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -3554,9 +3569,8 @@ func (in *PodAttachOptions) DeepCopy() *PodAttachOptions {
func (in *PodAttachOptions) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -3661,9 +3675,8 @@ func (in *PodExecOptions) DeepCopy() *PodExecOptions {
func (in *PodExecOptions) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -3695,9 +3708,8 @@ func (in *PodList) DeepCopy() *PodList {
func (in *PodList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -3718,8 +3730,7 @@ func (in *PodLogOptions) DeepCopyInto(out *PodLogOptions) {
if *in == nil {
*out = nil
} else {
*out = new(v1.Time)
(*in).DeepCopyInto(*out)
*out = (*in).DeepCopy()
}
}
if in.TailLines != nil {
@ -3757,9 +3768,8 @@ func (in *PodLogOptions) DeepCopy() *PodLogOptions {
func (in *PodLogOptions) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -3788,9 +3798,8 @@ func (in *PodPortForwardOptions) DeepCopy() *PodPortForwardOptions {
func (in *PodPortForwardOptions) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -3814,14 +3823,22 @@ func (in *PodProxyOptions) DeepCopy() *PodProxyOptions {
func (in *PodProxyOptions) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodSecurityContext) DeepCopyInto(out *PodSecurityContext) {
*out = *in
if in.ShareProcessNamespace != nil {
in, out := &in.ShareProcessNamespace, &out.ShareProcessNamespace
if *in == nil {
*out = nil
} else {
*out = new(bool)
**out = **in
}
}
if in.SELinuxOptions != nil {
in, out := &in.SELinuxOptions, &out.SELinuxOptions
if *in == nil {
@ -4042,8 +4059,7 @@ func (in *PodStatus) DeepCopyInto(out *PodStatus) {
if *in == nil {
*out = nil
} else {
*out = new(v1.Time)
(*in).DeepCopyInto(*out)
*out = (*in).DeepCopy()
}
}
if in.InitContainerStatuses != nil {
@ -4096,9 +4112,8 @@ func (in *PodStatusResult) DeepCopy() *PodStatusResult {
func (in *PodStatusResult) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -4124,9 +4139,8 @@ func (in *PodTemplate) DeepCopy() *PodTemplate {
func (in *PodTemplate) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -4158,9 +4172,8 @@ func (in *PodTemplateList) DeepCopy() *PodTemplateList {
func (in *PodTemplateList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -4409,9 +4422,8 @@ func (in *RangeAllocation) DeepCopy() *RangeAllocation {
func (in *RangeAllocation) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -4438,9 +4450,8 @@ func (in *ReplicationController) DeepCopy() *ReplicationController {
func (in *ReplicationController) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -4489,9 +4500,8 @@ func (in *ReplicationControllerList) DeepCopy() *ReplicationControllerList {
func (in *ReplicationControllerList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -4566,6 +4576,28 @@ func (in *ResourceFieldSelector) DeepCopy() *ResourceFieldSelector {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in ResourceList) DeepCopyInto(out *ResourceList) {
{
in := &in
*out = make(ResourceList, len(*in))
for key, val := range *in {
(*out)[key] = val.DeepCopy()
}
return
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceList.
func (in ResourceList) DeepCopy() ResourceList {
if in == nil {
return nil
}
out := new(ResourceList)
in.DeepCopyInto(out)
return *out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ResourceQuota) DeepCopyInto(out *ResourceQuota) {
*out = *in
@ -4590,9 +4622,8 @@ func (in *ResourceQuota) DeepCopy() *ResourceQuota {
func (in *ResourceQuota) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -4624,9 +4655,8 @@ func (in *ResourceQuotaList) DeepCopy() *ResourceQuotaList {
func (in *ResourceQuotaList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -4817,9 +4847,8 @@ func (in *Secret) DeepCopy() *Secret {
func (in *Secret) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -4903,9 +4932,8 @@ func (in *SecretList) DeepCopy() *SecretList {
func (in *SecretList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -5099,9 +5127,8 @@ func (in *SerializedReference) DeepCopy() *SerializedReference {
func (in *SerializedReference) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -5128,9 +5155,8 @@ func (in *Service) DeepCopy() *Service {
func (in *Service) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -5174,9 +5200,8 @@ func (in *ServiceAccount) DeepCopy() *ServiceAccount {
func (in *ServiceAccount) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -5208,9 +5233,8 @@ func (in *ServiceAccountList) DeepCopy() *ServiceAccountList {
func (in *ServiceAccountList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -5242,9 +5266,8 @@ func (in *ServiceList) DeepCopy() *ServiceList {
func (in *ServiceList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -5285,9 +5308,8 @@ func (in *ServiceProxyOptions) DeepCopy() *ServiceProxyOptions {
func (in *ServiceProxyOptions) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -5470,8 +5492,7 @@ func (in *Taint) DeepCopyInto(out *Taint) {
if *in == nil {
*out = nil
} else {
*out = new(v1.Time)
(*in).DeepCopyInto(*out)
*out = (*in).DeepCopy()
}
}
return
@ -5570,6 +5591,31 @@ func (in *VolumeMount) DeepCopy() *VolumeMount {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *VolumeNodeAffinity) DeepCopyInto(out *VolumeNodeAffinity) {
*out = *in
if in.Required != nil {
in, out := &in.Required, &out.Required
if *in == nil {
*out = nil
} else {
*out = new(NodeSelector)
(*in).DeepCopyInto(*out)
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeNodeAffinity.
func (in *VolumeNodeAffinity) DeepCopy() *VolumeNodeAffinity {
if in == nil {
return nil
}
out := new(VolumeNodeAffinity)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *VolumeProjection) DeepCopyInto(out *VolumeProjection) {
*out = *in

View File

@ -16,7 +16,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
// This file was autogenerated by deepcopy-gen. Do not edit it manually!
// Code generated by deepcopy-gen. DO NOT EDIT.
package extensions
@ -162,9 +162,8 @@ func (in *DaemonSet) DeepCopy() *DaemonSet {
func (in *DaemonSet) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -213,9 +212,8 @@ func (in *DaemonSetList) DeepCopy() *DaemonSetList {
func (in *DaemonSetList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -335,9 +333,8 @@ func (in *Deployment) DeepCopy() *Deployment {
func (in *Deployment) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -387,9 +384,8 @@ func (in *DeploymentList) DeepCopy() *DeploymentList {
func (in *DeploymentList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -421,9 +417,8 @@ func (in *DeploymentRollback) DeepCopy() *DeploymentRollback {
func (in *DeploymentRollback) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -652,9 +647,8 @@ func (in *Ingress) DeepCopy() *Ingress {
func (in *Ingress) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -703,9 +697,8 @@ func (in *IngressList) DeepCopy() *IngressList {
func (in *IngressList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -850,9 +843,8 @@ func (in *PodSecurityPolicy) DeepCopy() *PodSecurityPolicy {
func (in *PodSecurityPolicy) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -884,9 +876,8 @@ func (in *PodSecurityPolicyList) DeepCopy() *PodSecurityPolicyList {
func (in *PodSecurityPolicyList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -977,9 +968,8 @@ func (in *ReplicaSet) DeepCopy() *ReplicaSet {
func (in *ReplicaSet) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -1028,9 +1018,8 @@ func (in *ReplicaSetList) DeepCopy() *ReplicaSetList {
func (in *ReplicaSetList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -1103,9 +1092,8 @@ func (in *ReplicationControllerDummy) DeepCopy() *ReplicationControllerDummy {
func (in *ReplicationControllerDummy) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

View File

@ -16,7 +16,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
// This file was autogenerated by deepcopy-gen. Do not edit it manually!
// Code generated by deepcopy-gen. DO NOT EDIT.
package networking
@ -71,9 +71,8 @@ func (in *NetworkPolicy) DeepCopy() *NetworkPolicy {
func (in *NetworkPolicy) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -165,9 +164,8 @@ func (in *NetworkPolicyList) DeepCopy() *NetworkPolicyList {
func (in *NetworkPolicyList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

View File

@ -21,7 +21,7 @@ import (
"sync"
"github.com/golang/glog"
appsv1beta1 "k8s.io/api/apps/v1beta1"
apps "k8s.io/api/apps/v1"
"k8s.io/api/core/v1"
extensions "k8s.io/api/extensions/v1beta1"
"k8s.io/apimachinery/pkg/api/errors"
@ -436,18 +436,18 @@ func NewControllerRevisionControllerRefManager(
// If the error is nil, either the reconciliation succeeded, or no
// reconciliation was necessary. The list of ControllerRevisions that you now own is
// returned.
func (m *ControllerRevisionControllerRefManager) ClaimControllerRevisions(histories []*appsv1beta1.ControllerRevision) ([]*appsv1beta1.ControllerRevision, error) {
var claimed []*appsv1beta1.ControllerRevision
func (m *ControllerRevisionControllerRefManager) ClaimControllerRevisions(histories []*apps.ControllerRevision) ([]*apps.ControllerRevision, error) {
var claimed []*apps.ControllerRevision
var errlist []error
match := func(obj metav1.Object) bool {
return m.Selector.Matches(labels.Set(obj.GetLabels()))
}
adopt := func(obj metav1.Object) error {
return m.AdoptControllerRevision(obj.(*appsv1beta1.ControllerRevision))
return m.AdoptControllerRevision(obj.(*apps.ControllerRevision))
}
release := func(obj metav1.Object) error {
return m.ReleaseControllerRevision(obj.(*appsv1beta1.ControllerRevision))
return m.ReleaseControllerRevision(obj.(*apps.ControllerRevision))
}
for _, h := range histories {
@ -465,7 +465,7 @@ func (m *ControllerRevisionControllerRefManager) ClaimControllerRevisions(histor
// AdoptControllerRevision sends a patch to take control of the ControllerRevision. It returns the error if
// the patching fails.
func (m *ControllerRevisionControllerRefManager) AdoptControllerRevision(history *appsv1beta1.ControllerRevision) error {
func (m *ControllerRevisionControllerRefManager) AdoptControllerRevision(history *apps.ControllerRevision) error {
if err := m.CanAdopt(); err != nil {
return fmt.Errorf("can't adopt ControllerRevision %v/%v (%v): %v", history.Namespace, history.Name, history.UID, err)
}
@ -480,7 +480,7 @@ func (m *ControllerRevisionControllerRefManager) AdoptControllerRevision(history
// ReleaseControllerRevision sends a patch to free the ControllerRevision from the control of its controller.
// It returns the error if the patching fails. 404 and 422 errors are ignored.
func (m *ControllerRevisionControllerRefManager) ReleaseControllerRevision(history *appsv1beta1.ControllerRevision) error {
func (m *ControllerRevisionControllerRefManager) ReleaseControllerRevision(history *apps.ControllerRevision) error {
glog.V(2).Infof("patching ControllerRevision %s_%s to remove its controllerRef to %s/%s:%s",
history.Namespace, history.Name, m.controllerKind.GroupVersion(), m.controllerKind.Kind, m.Controller.GetName())
deleteOwnerRefPatch := fmt.Sprintf(`{"metadata":{"ownerReferences":[{"$patch":"delete","uid":"%s"}],"uid":"%s"}}`, m.Controller.GetUID(), history.UID)

View File

@ -64,10 +64,9 @@ const (
Accelerators utilfeature.Feature = "Accelerators"
// owner: @jiayingz
// alpha: v1.8
// beta: v1.10
//
// Enables support for Device Plugins
// Only Nvidia GPUs are tested as of v1.8.
DevicePlugins utilfeature.Feature = "DevicePlugins"
// owner: @gmarek
@ -191,7 +190,7 @@ const (
CSIPersistentVolume utilfeature.Feature = "CSIPersistentVolume"
// owner @MrHohn
// alpha: v1.9
// beta: v1.10
//
// Support configurable pod DNS parameters.
CustomPodDNS utilfeature.Feature = "CustomPodDNS"
@ -203,10 +202,10 @@ const (
BlockVolume utilfeature.Feature = "BlockVolume"
// owner: @pospispa
// alpha: v1.9
// beta: v1.10
//
// Postpone deletion of a PV or a PVC when they are being used
StorageProtection utilfeature.Feature = "StorageProtection"
StorageObjectInUseProtection utilfeature.Feature = "StorageObjectInUseProtection"
// owner: @aveshagarwal
// alpha: v1.9
@ -238,6 +237,30 @@ const (
// Mount secret, configMap, downwardAPI and projected volumes ReadOnly. Note: this feature
// gate is present only for backward compatibility, it will be removed in the 1.11 release.
ReadOnlyAPIDataVolumes utilfeature.Feature = "ReadOnlyAPIDataVolumes"
// owner: @k82cn
// alpha: v1.10
//
// Schedule DaemonSet Pods by default scheduler instead of DaemonSet controller
NoDaemonSetScheduler utilfeature.Feature = "NoDaemonSetScheduler"
// owner: @mikedanese
// alpha: v1.10
//
// Implement TokenRequest endpoint on service account resources.
TokenRequest utilfeature.Feature = "TokenRequest"
// owner: @Random-Liu
// alpha: v1.10
//
// Enable container log rotation for cri container runtime
CRIContainerLogRotation utilfeature.Feature = "CRIContainerLogRotation"
// owner: @verult
// beta: v1.10
//
// Enables the regional PD feature on GCE.
GCERegionalPersistentDisk utilfeature.Feature = "GCERegionalPersistentDisk"
)
func init() {
@ -253,11 +276,11 @@ var defaultKubernetesFeatureGates = map[utilfeature.Feature]utilfeature.FeatureS
ExperimentalHostUserNamespaceDefaultingGate: {Default: false, PreRelease: utilfeature.Beta},
ExperimentalCriticalPodAnnotation: {Default: false, PreRelease: utilfeature.Alpha},
Accelerators: {Default: false, PreRelease: utilfeature.Alpha},
DevicePlugins: {Default: false, PreRelease: utilfeature.Alpha},
DevicePlugins: {Default: true, PreRelease: utilfeature.Beta},
TaintBasedEvictions: {Default: false, PreRelease: utilfeature.Alpha},
RotateKubeletServerCertificate: {Default: false, PreRelease: utilfeature.Alpha},
RotateKubeletClientCertificate: {Default: true, PreRelease: utilfeature.Beta},
PersistentLocalVolumes: {Default: false, PreRelease: utilfeature.Alpha},
PersistentLocalVolumes: {Default: true, PreRelease: utilfeature.Beta},
LocalStorageCapacityIsolation: {Default: false, PreRelease: utilfeature.Alpha},
HugePages: {Default: true, PreRelease: utilfeature.Beta},
DebugContainers: {Default: false, PreRelease: utilfeature.Alpha},
@ -270,15 +293,19 @@ var defaultKubernetesFeatureGates = map[utilfeature.Feature]utilfeature.FeatureS
CPUManager: {Default: true, PreRelease: utilfeature.Beta},
ServiceNodeExclusion: {Default: false, PreRelease: utilfeature.Alpha},
MountContainers: {Default: false, PreRelease: utilfeature.Alpha},
VolumeScheduling: {Default: false, PreRelease: utilfeature.Alpha},
VolumeScheduling: {Default: true, PreRelease: utilfeature.Beta},
CSIPersistentVolume: {Default: true, PreRelease: utilfeature.Beta},
CustomPodDNS: {Default: false, PreRelease: utilfeature.Alpha},
CustomPodDNS: {Default: true, PreRelease: utilfeature.Beta},
BlockVolume: {Default: false, PreRelease: utilfeature.Alpha},
StorageProtection: {Default: false, PreRelease: utilfeature.Alpha},
StorageObjectInUseProtection: {Default: true, PreRelease: utilfeature.Beta},
ResourceLimitsPriorityFunction: {Default: false, PreRelease: utilfeature.Alpha},
SupportIPVSProxyMode: {Default: false, PreRelease: utilfeature.Beta},
SupportIPVSProxyMode: {Default: true, PreRelease: utilfeature.Beta},
SupportPodPidsLimit: {Default: false, PreRelease: utilfeature.Alpha},
HyperVContainer: {Default: false, PreRelease: utilfeature.Alpha},
NoDaemonSetScheduler: {Default: false, PreRelease: utilfeature.Alpha},
TokenRequest: {Default: false, PreRelease: utilfeature.Alpha},
CRIContainerLogRotation: {Default: false, PreRelease: utilfeature.Alpha},
GCERegionalPersistentDisk: {Default: true, PreRelease: utilfeature.Beta},
// inherited features from generic apiserver, relisted here to get a conflict if it is changed
// unintentionally on either side:
@ -290,7 +317,8 @@ var defaultKubernetesFeatureGates = map[utilfeature.Feature]utilfeature.FeatureS
// inherited features from apiextensions-apiserver, relisted here to get a conflict if it is changed
// unintentionally on either side:
apiextensionsfeatures.CustomResourceValidation: {Default: true, PreRelease: utilfeature.Beta},
apiextensionsfeatures.CustomResourceValidation: {Default: true, PreRelease: utilfeature.Beta},
apiextensionsfeatures.CustomResourceSubresources: {Default: false, PreRelease: utilfeature.Alpha},
// features that enable backwards compatibility but are scheduled to be removed
ServiceProxyAllowExternalIPs: {Default: false, PreRelease: utilfeature.Deprecated},

View File

@ -1066,7 +1066,7 @@ func (m *PodSandboxStatus) GetAnnotations() map[string]string {
type PodSandboxStatusResponse struct {
// Status of the PodSandbox.
Status *PodSandboxStatus `protobuf:"bytes,1,opt,name=status" json:"status,omitempty"`
// Info is extra information of the PodSandbox. The key could be abitrary string, and
// Info is extra information of the PodSandbox. The key could be arbitrary string, and
// value should be in json format. The information could include anything useful for
// debug, e.g. network namespace for linux container based container runtime.
// It should only be returned non-empty when Verbose is true.
@ -2401,7 +2401,7 @@ func (m *ContainerStatus) GetLogPath() string {
type ContainerStatusResponse struct {
// Status of the container.
Status *ContainerStatus `protobuf:"bytes,1,opt,name=status" json:"status,omitempty"`
// Info is extra information of the Container. The key could be abitrary string, and
// Info is extra information of the Container. The key could be arbitrary string, and
// value should be in json format. The information could include anything useful for
// debug, e.g. pid for linux container based container runtime.
// It should only be returned non-empty when Verbose is true.
@ -2870,7 +2870,7 @@ func (m *ImageStatusRequest) GetVerbose() bool {
type ImageStatusResponse struct {
// Status of the image.
Image *Image `protobuf:"bytes,1,opt,name=image" json:"image,omitempty"`
// Info is extra information of the Image. The key could be abitrary string, and
// Info is extra information of the Image. The key could be arbitrary string, and
// value should be in json format. The information could include anything useful
// for debug, e.g. image config for oci image based container runtime.
// It should only be returned non-empty when Verbose is true.
@ -3173,7 +3173,7 @@ func (m *StatusRequest) GetVerbose() bool {
type StatusResponse struct {
// Status of the Runtime.
Status *RuntimeStatus `protobuf:"bytes,1,opt,name=status" json:"status,omitempty"`
// Info is extra information of the Runtime. The key could be abitrary string, and
// Info is extra information of the Runtime. The key could be arbitrary string, and
// value should be in json format. The information could include anything useful for
// debug, e.g. plugins used by the container runtime.
// It should only be returned non-empty when Verbose is true.
@ -3739,7 +3739,9 @@ type RuntimeServiceClient interface {
UpdateContainerResources(ctx context.Context, in *UpdateContainerResourcesRequest, opts ...grpc.CallOption) (*UpdateContainerResourcesResponse, error)
// ReopenContainerLog asks runtime to reopen the stdout/stderr log file
// for the container. This is often called after the log file has been
// rotated.
// rotated. If the container is not running, container runtime can choose
// to either create a new log file and return nil, or return an error.
// Once it returns error, new container log file MUST NOT be created.
ReopenContainerLog(ctx context.Context, in *ReopenContainerLogRequest, opts ...grpc.CallOption) (*ReopenContainerLogResponse, error)
// ExecSync runs a command in a container synchronously.
ExecSync(ctx context.Context, in *ExecSyncRequest, opts ...grpc.CallOption) (*ExecSyncResponse, error)
@ -4017,7 +4019,9 @@ type RuntimeServiceServer interface {
UpdateContainerResources(context.Context, *UpdateContainerResourcesRequest) (*UpdateContainerResourcesResponse, error)
// ReopenContainerLog asks runtime to reopen the stdout/stderr log file
// for the container. This is often called after the log file has been
// rotated.
// rotated. If the container is not running, container runtime can choose
// to either create a new log file and return nil, or return an error.
// Once it returns error, new container log file MUST NOT be created.
ReopenContainerLog(context.Context, *ReopenContainerLogRequest) (*ReopenContainerLogResponse, error)
// ExecSync runs a command in a container synchronously.
ExecSync(context.Context, *ExecSyncRequest) (*ExecSyncResponse, error)

View File

@ -66,7 +66,9 @@ service RuntimeService {
rpc UpdateContainerResources(UpdateContainerResourcesRequest) returns (UpdateContainerResourcesResponse) {}
// ReopenContainerLog asks runtime to reopen the stdout/stderr log file
// for the container. This is often called after the log file has been
// rotated.
// rotated. If the container is not running, container runtime can choose
// to either create a new log file and return nil, or return an error.
// Once it returns error, new container log file MUST NOT be created.
rpc ReopenContainerLog(ReopenContainerLogRequest) returns (ReopenContainerLogResponse) {}
// ExecSync runs a command in a container synchronously.
@ -414,7 +416,7 @@ message PodSandboxStatus {
message PodSandboxStatusResponse {
// Status of the PodSandbox.
PodSandboxStatus status = 1;
// Info is extra information of the PodSandbox. The key could be abitrary string, and
// Info is extra information of the PodSandbox. The key could be arbitrary string, and
// value should be in json format. The information could include anything useful for
// debug, e.g. network namespace for linux container based container runtime.
// It should only be returned non-empty when Verbose is true.
@ -852,7 +854,7 @@ message ContainerStatus {
message ContainerStatusResponse {
// Status of the container.
ContainerStatus status = 1;
// Info is extra information of the Container. The key could be abitrary string, and
// Info is extra information of the Container. The key could be arbitrary string, and
// value should be in json format. The information could include anything useful for
// debug, e.g. pid for linux container based container runtime.
// It should only be returned non-empty when Verbose is true.
@ -993,7 +995,7 @@ message ImageStatusRequest {
message ImageStatusResponse {
// Status of the image.
Image image = 1;
// Info is extra information of the Image. The key could be abitrary string, and
// Info is extra information of the Image. The key could be arbitrary string, and
// value should be in json format. The information could include anything useful
// for debug, e.g. image config for oci image based container runtime.
// It should only be returned non-empty when Verbose is true.
@ -1088,7 +1090,7 @@ message StatusRequest {
message StatusResponse {
// Status of the Runtime.
RuntimeStatus status = 1;
// Info is extra information of the Runtime. The key could be abitrary string, and
// Info is extra information of the Runtime. The key could be arbitrary string, and
// value should be in json format. The information could include anything useful for
// debug, e.g. plugins used by the container runtime.
// It should only be returned non-empty when Verbose is true.

View File

@ -1,25 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package apis
const (
// When kubelet is started with the "external" cloud provider, then
// it sets this annotation on the node to denote an ip address set from the
// cmd line flag. This ip is verified with the cloudprovider as valid by
// the cloud-controller-manager
AnnotationProvidedIPAddr = "alpha.kubernetes.io/provided-node-ip"
)

View File

@ -1,33 +0,0 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package apis
const (
LabelHostname = "kubernetes.io/hostname"
LabelZoneFailureDomain = "failure-domain.beta.kubernetes.io/zone"
LabelMultiZoneDelimiter = "__"
LabelZoneRegion = "failure-domain.beta.kubernetes.io/region"
LabelInstanceType = "beta.kubernetes.io/instance-type"
LabelOS = "beta.kubernetes.io/os"
LabelArch = "beta.kubernetes.io/arch"
)
// When the --failure-domains scheduler flag is not specified,
// DefaultFailureDomains defines the set of label keys used when TopologyKey is empty in PreferredDuringScheduling anti-affinity.
var DefaultFailureDomains string = LabelHostname + "," + LabelZoneFailureDomain + "," + LabelZoneRegion

View File

@ -19,6 +19,8 @@ package container
import (
"fmt"
"time"
"github.com/golang/glog"
)
// Specified a policy for garbage collecting containers.
@ -80,5 +82,6 @@ func (cgc *realContainerGC) GarbageCollect() error {
}
func (cgc *realContainerGC) DeleteAllUnusedContainers() error {
glog.Infof("attempting to delete unused containers")
return cgc.runtime.GarbageCollect(cgc.policy, cgc.sourcesReadyProvider.AllReady(), true)
}

View File

@ -26,9 +26,13 @@ import (
"sync"
"github.com/golang/glog"
"k8s.io/api/core/v1"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
iptablesproxy "k8s.io/kubernetes/pkg/proxy/iptables"
"k8s.io/kubernetes/pkg/util/conntrack"
utiliptables "k8s.io/kubernetes/pkg/util/iptables"
utilnet "k8s.io/kubernetes/pkg/util/net"
"k8s.io/utils/exec"
)
// HostPortManager is an interface for adding and removing hostport for a given pod sandbox.
@ -44,18 +48,26 @@ type HostPortManager interface {
}
type hostportManager struct {
hostPortMap map[hostport]closeable
iptables utiliptables.Interface
portOpener hostportOpener
mu sync.Mutex
hostPortMap map[hostport]closeable
execer exec.Interface
conntrackFound bool
iptables utiliptables.Interface
portOpener hostportOpener
mu sync.Mutex
}
func NewHostportManager(iptables utiliptables.Interface) HostPortManager {
return &hostportManager{
h := &hostportManager{
hostPortMap: make(map[hostport]closeable),
execer: exec.New(),
iptables: iptables,
portOpener: openLocalPort,
}
h.conntrackFound = conntrack.Exists(h.execer)
if !h.conntrackFound {
glog.Warningf("The binary conntrack is not installed, this can cause failures in network connection cleanup.")
}
return h
}
func (hm *hostportManager) Add(id string, podPortMapping *PodPortMapping, natInterfaceName string) (err error) {
@ -103,10 +115,14 @@ func (hm *hostportManager) Add(id string, podPortMapping *PodPortMapping, natInt
}
newChains := []utiliptables.Chain{}
conntrackPortsToRemove := []int{}
for _, pm := range hostportMappings {
protocol := strings.ToLower(string(pm.Protocol))
chain := getHostportChain(id, pm)
newChains = append(newChains, chain)
if pm.Protocol == v1.ProtocolUDP {
conntrackPortsToRemove = append(conntrackPortsToRemove, int(pm.HostPort))
}
// Add new hostport chain
writeLine(natChains, utiliptables.MakeChainLine(chain))
@ -150,6 +166,21 @@ func (hm *hostportManager) Add(id string, podPortMapping *PodPortMapping, natInt
// clean up opened host port if encounter any error
return utilerrors.NewAggregate([]error{err, hm.closeHostports(hostportMappings)})
}
isIpv6 := utilnet.IsIPv6(podPortMapping.IP)
// Remove conntrack entries just after adding the new iptables rules. If the conntrack entry is removed along with
// the IP tables rule, it can be the case that the packets received by the node after iptables rule removal will
// create a new conntrack entry without any DNAT. That will result in blackhole of the traffic even after correct
// iptables rules have been added back.
if hm.execer != nil && hm.conntrackFound {
glog.Infof("Starting to delete udp conntrack entries: %v, isIPv6 - %v", conntrackPortsToRemove, isIpv6)
for _, port := range conntrackPortsToRemove {
err = conntrack.ClearEntriesForPort(hm.execer, port, isIpv6, v1.ProtocolUDP)
if err != nil {
glog.Errorf("Failed to clear udp conntrack for port %d, error: %v", port, err)
}
}
}
return nil
}

View File

@ -45,7 +45,7 @@ const (
// options contains details about which streams are required for
// port forwarding.
// All fields incldued in V4Options need to be expressed explicitly in the
// All fields included in V4Options need to be expressed explicitly in the
// CRI (pkg/kubelet/apis/cri/{version}/api.proto) PortForwardRequest.
type V4Options struct {
Ports []int32

View File

@ -22,6 +22,7 @@ import (
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
kubeapi "k8s.io/kubernetes/pkg/apis/core"
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
)
const (
@ -139,15 +140,16 @@ func (sp SyncPodType) String() string {
}
}
// IsCriticalPod returns true if the pod bears the critical pod annotation
// key. Both the rescheduler and the kubelet use this key to make admission
// and scheduling decisions.
// IsCriticalPod returns true if the pod bears the critical pod annotation key or if pod's priority is greater than
// or equal to SystemCriticalPriority. Both the rescheduler(deprecated in 1.10) and the kubelet use this function
// to make admission and scheduling decisions.
func IsCriticalPod(pod *v1.Pod) bool {
return IsCritical(pod.Namespace, pod.Annotations)
return IsCritical(pod.Namespace, pod.Annotations) || (pod.Spec.Priority != nil && IsCriticalPodBasedOnPriority(pod.Namespace, *pod.Spec.Priority))
}
// IsCritical returns true if parameters bear the critical pod annotation
// key. The DaemonSetController use this key directly to make scheduling decisions.
// TODO: @ravig - Deprecated. Remove this when we move to resolving critical pods based on priorityClassName.
func IsCritical(ns string, annotations map[string]string) bool {
// Critical pods are restricted to "kube-system" namespace as of now.
if ns != kubeapi.NamespaceSystem {
@ -159,3 +161,15 @@ func IsCritical(ns string, annotations map[string]string) bool {
}
return false
}
// IsCriticalPodBasedOnPriority checks if the given pod is a critical pod based on priority resolved from pod Spec.
func IsCriticalPodBasedOnPriority(ns string, priority int32) bool {
// Critical pods are restricted to "kube-system" namespace as of now.
if ns != kubeapi.NamespaceSystem {
return false
}
if priority >= schedulerapi.SystemCriticalPriority {
return true
}
return false
}

View File

@ -26,12 +26,12 @@ const (
// SchedulerPort is the default port for the scheduler status server.
// May be overridden by a flag at startup.
SchedulerPort = 10251
// ControllerManagerPort is the default port for the controller manager status server.
// InsecureKubeControllerManagerPort is the default port for the controller manager status server.
// May be overridden by a flag at startup.
ControllerManagerPort = 10252
// CloudControllerManagerPort is the default port for the cloud controller manager server.
InsecureKubeControllerManagerPort = 10252
// InsecureCloudControllerManagerPort is the default port for the cloud controller manager server.
// This value may be overridden by a flag at startup.
CloudControllerManagerPort = 10253
InsecureCloudControllerManagerPort = 10253
// KubeletReadOnlyPort exposes basic read-only services from the kubelet.
// May be overridden by a flag at startup.
// This is necessary for heapster to collect monitoring stats from the kubelet

314
vendor/k8s.io/kubernetes/pkg/proxy/endpoints.go generated vendored Normal file
View File

@ -0,0 +1,314 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package proxy
import (
"net"
"reflect"
"strconv"
"sync"
"github.com/golang/glog"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/client-go/tools/record"
api "k8s.io/kubernetes/pkg/apis/core"
utilproxy "k8s.io/kubernetes/pkg/proxy/util"
utilnet "k8s.io/kubernetes/pkg/util/net"
)
// BaseEndpointInfo contains base information that defines an endpoint.
// This could be used directly by proxier while processing endpoints,
// or can be used for constructing a more specific EndpointInfo struct
// defined by the proxier if needed.
type BaseEndpointInfo struct {
Endpoint string // TODO: should be an endpointString type
// IsLocal indicates whether the endpoint is running in same host as kube-proxy.
IsLocal bool
}
var _ Endpoint = &BaseEndpointInfo{}
// String is part of proxy.Endpoint interface.
func (info *BaseEndpointInfo) String() string {
return info.Endpoint
}
// GetIsLocal is part of proxy.Endpoint interface.
func (info *BaseEndpointInfo) GetIsLocal() bool {
return info.IsLocal
}
// IP returns just the IP part of the endpoint, it's a part of proxy.Endpoint interface.
func (info *BaseEndpointInfo) IP() string {
return utilproxy.IPPart(info.Endpoint)
}
// Port returns just the Port part of the endpoint.
func (info *BaseEndpointInfo) Port() (int, error) {
return utilproxy.PortPart(info.Endpoint)
}
// Equal is part of proxy.Endpoint interface.
func (info *BaseEndpointInfo) Equal(other Endpoint) bool {
return info.String() == other.String() && info.GetIsLocal() == other.GetIsLocal()
}
func newBaseEndpointInfo(IP string, port int, isLocal bool) *BaseEndpointInfo {
return &BaseEndpointInfo{
Endpoint: net.JoinHostPort(IP, strconv.Itoa(port)),
IsLocal: isLocal,
}
}
type makeEndpointFunc func(info *BaseEndpointInfo) Endpoint
// EndpointChangeTracker carries state about uncommitted changes to an arbitrary number of
// Endpoints, keyed by their namespace and name.
type EndpointChangeTracker struct {
// lock protects items.
lock sync.Mutex
// hostname is the host where kube-proxy is running.
hostname string
// items maps a service to is endpointsChange.
items map[types.NamespacedName]*endpointsChange
// makeEndpointInfo allows proxier to inject customized information when processing endpoint.
makeEndpointInfo makeEndpointFunc
// isIPv6Mode indicates if change tracker is under IPv6/IPv4 mode. Nil means not applicable.
isIPv6Mode *bool
recorder record.EventRecorder
}
// NewEndpointChangeTracker initializes an EndpointsChangeMap
func NewEndpointChangeTracker(hostname string, makeEndpointInfo makeEndpointFunc, isIPv6Mode *bool, recorder record.EventRecorder) *EndpointChangeTracker {
return &EndpointChangeTracker{
hostname: hostname,
items: make(map[types.NamespacedName]*endpointsChange),
makeEndpointInfo: makeEndpointInfo,
isIPv6Mode: isIPv6Mode,
recorder: recorder,
}
}
// Update updates given service's endpoints change map based on the <previous, current> endpoints pair. It returns true
// if items changed, otherwise return false. Update can be used to add/update/delete items of EndpointsChangeMap. For example,
// Add item
// - pass <nil, endpoints> as the <previous, current> pair.
// Update item
// - pass <oldEndpoints, endpoints> as the <previous, current> pair.
// Delete item
// - pass <endpoints, nil> as the <previous, current> pair.
func (ect *EndpointChangeTracker) Update(previous, current *api.Endpoints) bool {
endpoints := current
if endpoints == nil {
endpoints = previous
}
// previous == nil && current == nil is unexpected, we should return false directly.
if endpoints == nil {
return false
}
namespacedName := types.NamespacedName{Namespace: endpoints.Namespace, Name: endpoints.Name}
ect.lock.Lock()
defer ect.lock.Unlock()
change, exists := ect.items[namespacedName]
if !exists {
change = &endpointsChange{}
change.previous = ect.endpointsToEndpointsMap(previous)
ect.items[namespacedName] = change
}
change.current = ect.endpointsToEndpointsMap(current)
// if change.previous equal to change.current, it means no change
if reflect.DeepEqual(change.previous, change.current) {
delete(ect.items, namespacedName)
}
return len(ect.items) > 0
}
// endpointsChange contains all changes to endpoints that happened since proxy rules were synced. For a single object,
// changes are accumulated, i.e. previous is state from before applying the changes,
// current is state after applying the changes.
type endpointsChange struct {
previous EndpointsMap
current EndpointsMap
}
// UpdateEndpointMapResult is the updated results after applying endpoints changes.
type UpdateEndpointMapResult struct {
// HCEndpointsLocalIPSize maps an endpoints name to the length of its local IPs.
HCEndpointsLocalIPSize map[types.NamespacedName]int
// StaleEndpoints identifies if an endpoints service pair is stale.
StaleEndpoints []ServiceEndpoint
// StaleServiceNames identifies if a service is stale.
StaleServiceNames []ServicePortName
}
// UpdateEndpointsMap updates endpointsMap base on the given changes.
func UpdateEndpointsMap(endpointsMap EndpointsMap, changes *EndpointChangeTracker) (result UpdateEndpointMapResult) {
result.StaleEndpoints = make([]ServiceEndpoint, 0)
result.StaleServiceNames = make([]ServicePortName, 0)
endpointsMap.apply(changes, &result.StaleEndpoints, &result.StaleServiceNames)
// TODO: If this will appear to be computationally expensive, consider
// computing this incrementally similarly to endpointsMap.
result.HCEndpointsLocalIPSize = make(map[types.NamespacedName]int)
localIPs := GetLocalEndpointIPs(endpointsMap)
for nsn, ips := range localIPs {
result.HCEndpointsLocalIPSize[nsn] = len(ips)
}
return result
}
// EndpointsMap maps a service name to a list of all its Endpoints.
type EndpointsMap map[ServicePortName][]Endpoint
// endpointsToEndpointsMap translates single Endpoints object to EndpointsMap.
// This function is used for incremental updated of endpointsMap.
//
// NOTE: endpoints object should NOT be modified.
func (ect *EndpointChangeTracker) endpointsToEndpointsMap(endpoints *api.Endpoints) EndpointsMap {
if endpoints == nil {
return nil
}
endpointsMap := make(EndpointsMap)
// We need to build a map of portname -> all ip:ports for that
// portname. Explode Endpoints.Subsets[*] into this structure.
for i := range endpoints.Subsets {
ss := &endpoints.Subsets[i]
for i := range ss.Ports {
port := &ss.Ports[i]
if port.Port == 0 {
glog.Warningf("ignoring invalid endpoint port %s", port.Name)
continue
}
svcPortName := ServicePortName{
NamespacedName: types.NamespacedName{Namespace: endpoints.Namespace, Name: endpoints.Name},
Port: port.Name,
}
for i := range ss.Addresses {
addr := &ss.Addresses[i]
if addr.IP == "" {
glog.Warningf("ignoring invalid endpoint port %s with empty host", port.Name)
continue
}
// Filter out the incorrect IP version case.
// Any endpoint port that contains incorrect IP version will be ignored.
if ect.isIPv6Mode != nil && utilnet.IsIPv6String(addr.IP) != *ect.isIPv6Mode {
// Emit event on the corresponding service which had a different
// IP version than the endpoint.
utilproxy.LogAndEmitIncorrectIPVersionEvent(ect.recorder, "endpoints", addr.IP, endpoints.Name, endpoints.Namespace, "")
continue
}
isLocal := addr.NodeName != nil && *addr.NodeName == ect.hostname
baseEndpointInfo := newBaseEndpointInfo(addr.IP, int(port.Port), isLocal)
if ect.makeEndpointInfo != nil {
endpointsMap[svcPortName] = append(endpointsMap[svcPortName], ect.makeEndpointInfo(baseEndpointInfo))
} else {
endpointsMap[svcPortName] = append(endpointsMap[svcPortName], baseEndpointInfo)
}
}
if glog.V(3) {
newEPList := []string{}
for _, ep := range endpointsMap[svcPortName] {
newEPList = append(newEPList, ep.String())
}
glog.Infof("Setting endpoints for %q to %+v", svcPortName, newEPList)
}
}
}
return endpointsMap
}
// apply the changes to EndpointsMap and updates stale endpoints and service-endpoints pair. The `staleEndpoints` argument
// is passed in to store the stale udp endpoints and `staleServiceNames` argument is passed in to store the stale udp service.
// The changes map is cleared after applying them.
func (endpointsMap EndpointsMap) apply(changes *EndpointChangeTracker, staleEndpoints *[]ServiceEndpoint, staleServiceNames *[]ServicePortName) {
if changes == nil {
return
}
changes.lock.Lock()
defer changes.lock.Unlock()
for _, change := range changes.items {
endpointsMap.Unmerge(change.previous)
endpointsMap.Merge(change.current)
detectStaleConnections(change.previous, change.current, staleEndpoints, staleServiceNames)
}
changes.items = make(map[types.NamespacedName]*endpointsChange)
}
// Merge ensures that the current EndpointsMap contains all <service, endpoints> pairs from the EndpointsMap passed in.
func (em EndpointsMap) Merge(other EndpointsMap) {
for svcPortName := range other {
em[svcPortName] = other[svcPortName]
}
}
// Unmerge removes the <service, endpoints> pairs from the current EndpointsMap which are contained in the EndpointsMap passed in.
func (em EndpointsMap) Unmerge(other EndpointsMap) {
for svcPortName := range other {
delete(em, svcPortName)
}
}
// GetLocalEndpointIPs returns endpoints IPs if given endpoint is local - local means the endpoint is running in same host as kube-proxy.
func GetLocalEndpointIPs(endpointsMap EndpointsMap) map[types.NamespacedName]sets.String {
localIPs := make(map[types.NamespacedName]sets.String)
for svcPortName, epList := range endpointsMap {
for _, ep := range epList {
if ep.GetIsLocal() {
nsn := svcPortName.NamespacedName
if localIPs[nsn] == nil {
localIPs[nsn] = sets.NewString()
}
localIPs[nsn].Insert(ep.IP())
}
}
}
return localIPs
}
// detectStaleConnections modifies <staleEndpoints> and <staleServices> with detected stale connections. <staleServiceNames>
// is used to store stale udp service in order to clear udp conntrack later.
func detectStaleConnections(oldEndpointsMap, newEndpointsMap EndpointsMap, staleEndpoints *[]ServiceEndpoint, staleServiceNames *[]ServicePortName) {
for svcPortName, epList := range oldEndpointsMap {
for _, ep := range epList {
stale := true
for i := range newEndpointsMap[svcPortName] {
if newEndpointsMap[svcPortName][i].Equal(ep) {
stale = false
break
}
}
if stale {
glog.V(4).Infof("Stale endpoint %v -> %v", svcPortName, ep.String())
*staleEndpoints = append(*staleEndpoints, ServiceEndpoint{Endpoint: ep.String(), ServicePortName: svcPortName})
}
}
}
for svcPortName, epList := range newEndpointsMap {
// For udp service, if its backend changes from 0 to non-0. There may exist a conntrack entry that could blackhole traffic to the service.
if len(epList) > 0 && len(oldEndpointsMap[svcPortName]) == 0 {
*staleServiceNames = append(*staleServiceNames, svcPortName)
}
}
}

File diff suppressed because it is too large Load Diff

344
vendor/k8s.io/kubernetes/pkg/proxy/service.go generated vendored Normal file
View File

@ -0,0 +1,344 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package proxy
import (
"fmt"
"net"
"reflect"
"strings"
"sync"
"github.com/golang/glog"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/client-go/tools/record"
apiservice "k8s.io/kubernetes/pkg/api/service"
api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/apis/core/helper"
utilproxy "k8s.io/kubernetes/pkg/proxy/util"
utilnet "k8s.io/kubernetes/pkg/util/net"
)
// BaseServiceInfo contains base information that defines a service.
// This could be used directly by proxier while processing services,
// or can be used for constructing a more specific ServiceInfo struct
// defined by the proxier if needed.
type BaseServiceInfo struct {
ClusterIP net.IP
Port int
Protocol api.Protocol
NodePort int
LoadBalancerStatus api.LoadBalancerStatus
SessionAffinityType api.ServiceAffinity
StickyMaxAgeSeconds int
ExternalIPs []string
LoadBalancerSourceRanges []string
HealthCheckNodePort int
OnlyNodeLocalEndpoints bool
}
var _ ServicePort = &BaseServiceInfo{}
// String is part of ServicePort interface.
func (info *BaseServiceInfo) String() string {
return fmt.Sprintf("%s:%d/%s", info.ClusterIP, info.Port, info.Protocol)
}
// ClusterIPString is part of ServicePort interface.
func (info *BaseServiceInfo) ClusterIPString() string {
return info.ClusterIP.String()
}
// GetProtocol is part of ServicePort interface.
func (info *BaseServiceInfo) GetProtocol() api.Protocol {
return info.Protocol
}
// GetHealthCheckNodePort is part of ServicePort interface.
func (info *BaseServiceInfo) GetHealthCheckNodePort() int {
return info.HealthCheckNodePort
}
func (sct *ServiceChangeTracker) newBaseServiceInfo(port *api.ServicePort, service *api.Service) *BaseServiceInfo {
onlyNodeLocalEndpoints := false
if apiservice.RequestsOnlyLocalTraffic(service) {
onlyNodeLocalEndpoints = true
}
var stickyMaxAgeSeconds int
if service.Spec.SessionAffinity == api.ServiceAffinityClientIP {
// Kube-apiserver side guarantees SessionAffinityConfig won't be nil when session affinity type is ClientIP
stickyMaxAgeSeconds = int(*service.Spec.SessionAffinityConfig.ClientIP.TimeoutSeconds)
}
info := &BaseServiceInfo{
ClusterIP: net.ParseIP(service.Spec.ClusterIP),
Port: int(port.Port),
Protocol: port.Protocol,
NodePort: int(port.NodePort),
// Deep-copy in case the service instance changes
LoadBalancerStatus: *helper.LoadBalancerStatusDeepCopy(&service.Status.LoadBalancer),
SessionAffinityType: service.Spec.SessionAffinity,
StickyMaxAgeSeconds: stickyMaxAgeSeconds,
OnlyNodeLocalEndpoints: onlyNodeLocalEndpoints,
}
if sct.isIPv6Mode == nil {
info.ExternalIPs = make([]string, len(service.Spec.ExternalIPs))
info.LoadBalancerSourceRanges = make([]string, len(service.Spec.LoadBalancerSourceRanges))
copy(info.LoadBalancerSourceRanges, service.Spec.LoadBalancerSourceRanges)
copy(info.ExternalIPs, service.Spec.ExternalIPs)
} else {
// Filter out the incorrect IP version case.
// If ExternalIPs and LoadBalancerSourceRanges on service contains incorrect IP versions,
// only filter out the incorrect ones.
var incorrectIPs []string
info.ExternalIPs, incorrectIPs = utilnet.FilterIncorrectIPVersion(service.Spec.ExternalIPs, *sct.isIPv6Mode)
if len(incorrectIPs) > 0 {
utilproxy.LogAndEmitIncorrectIPVersionEvent(sct.recorder, "externalIPs", strings.Join(incorrectIPs, ","), service.Namespace, service.Name, service.UID)
}
info.LoadBalancerSourceRanges, incorrectIPs = utilnet.FilterIncorrectCIDRVersion(service.Spec.LoadBalancerSourceRanges, *sct.isIPv6Mode)
if len(incorrectIPs) > 0 {
utilproxy.LogAndEmitIncorrectIPVersionEvent(sct.recorder, "loadBalancerSourceRanges", strings.Join(incorrectIPs, ","), service.Namespace, service.Name, service.UID)
}
}
if apiservice.NeedsHealthCheck(service) {
p := service.Spec.HealthCheckNodePort
if p == 0 {
glog.Errorf("Service %s/%s has no healthcheck nodeport", service.Namespace, service.Name)
} else {
info.HealthCheckNodePort = int(p)
}
}
return info
}
type makeServicePortFunc func(*api.ServicePort, *api.Service, *BaseServiceInfo) ServicePort
// serviceChange contains all changes to services that happened since proxy rules were synced. For a single object,
// changes are accumulated, i.e. previous is state from before applying the changes,
// current is state after applying all of the changes.
type serviceChange struct {
previous ServiceMap
current ServiceMap
}
// ServiceChangeTracker carries state about uncommitted changes to an arbitrary number of
// Services, keyed by their namespace and name.
type ServiceChangeTracker struct {
// lock protects items.
lock sync.Mutex
// items maps a service to its serviceChange.
items map[types.NamespacedName]*serviceChange
// makeServiceInfo allows proxier to inject customized information when processing service.
makeServiceInfo makeServicePortFunc
// isIPv6Mode indicates if change tracker is under IPv6/IPv4 mode. Nil means not applicable.
isIPv6Mode *bool
recorder record.EventRecorder
}
// NewServiceChangeTracker initializes a ServiceChangeTracker
func NewServiceChangeTracker(makeServiceInfo makeServicePortFunc, isIPv6Mode *bool, recorder record.EventRecorder) *ServiceChangeTracker {
return &ServiceChangeTracker{
items: make(map[types.NamespacedName]*serviceChange),
makeServiceInfo: makeServiceInfo,
isIPv6Mode: isIPv6Mode,
recorder: recorder,
}
}
// Update updates given service's change map based on the <previous, current> service pair. It returns true if items changed,
// otherwise return false. Update can be used to add/update/delete items of ServiceChangeMap. For example,
// Add item
// - pass <nil, service> as the <previous, current> pair.
// Update item
// - pass <oldService, service> as the <previous, current> pair.
// Delete item
// - pass <service, nil> as the <previous, current> pair.
func (sct *ServiceChangeTracker) Update(previous, current *api.Service) bool {
svc := current
if svc == nil {
svc = previous
}
// previous == nil && current == nil is unexpected, we should return false directly.
if svc == nil {
return false
}
namespacedName := types.NamespacedName{Namespace: svc.Namespace, Name: svc.Name}
sct.lock.Lock()
defer sct.lock.Unlock()
change, exists := sct.items[namespacedName]
if !exists {
change = &serviceChange{}
change.previous = sct.serviceToServiceMap(previous)
sct.items[namespacedName] = change
}
change.current = sct.serviceToServiceMap(current)
// if change.previous equal to change.current, it means no change
if reflect.DeepEqual(change.previous, change.current) {
delete(sct.items, namespacedName)
}
return len(sct.items) > 0
}
// UpdateServiceMapResult is the updated results after applying service changes.
type UpdateServiceMapResult struct {
// HCServiceNodePorts is a map of Service names to node port numbers which indicate the health of that Service on this Node.
// The value(uint16) of HCServices map is the service health check node port.
HCServiceNodePorts map[types.NamespacedName]uint16
// UDPStaleClusterIP holds stale (no longer assigned to a Service) Service IPs that had UDP ports.
// Callers can use this to abort timeout-waits or clear connection-tracking information.
UDPStaleClusterIP sets.String
}
// UpdateServiceMap updates ServiceMap based on the given changes.
func UpdateServiceMap(serviceMap ServiceMap, changes *ServiceChangeTracker) (result UpdateServiceMapResult) {
result.UDPStaleClusterIP = sets.NewString()
serviceMap.apply(changes, result.UDPStaleClusterIP)
// TODO: If this will appear to be computationally expensive, consider
// computing this incrementally similarly to serviceMap.
result.HCServiceNodePorts = make(map[types.NamespacedName]uint16)
for svcPortName, info := range serviceMap {
if info.GetHealthCheckNodePort() != 0 {
result.HCServiceNodePorts[svcPortName.NamespacedName] = uint16(info.GetHealthCheckNodePort())
}
}
return result
}
// ServiceMap maps a service to its ServicePort.
type ServiceMap map[ServicePortName]ServicePort
// serviceToServiceMap translates a single Service object to a ServiceMap.
//
// NOTE: service object should NOT be modified.
func (sct *ServiceChangeTracker) serviceToServiceMap(service *api.Service) ServiceMap {
if service == nil {
return nil
}
svcName := types.NamespacedName{Namespace: service.Namespace, Name: service.Name}
if utilproxy.ShouldSkipService(svcName, service) {
return nil
}
if len(service.Spec.ClusterIP) != 0 {
// Filter out the incorrect IP version case.
// If ClusterIP on service has incorrect IP version, service itself will be ignored.
if sct.isIPv6Mode != nil && utilnet.IsIPv6String(service.Spec.ClusterIP) != *sct.isIPv6Mode {
utilproxy.LogAndEmitIncorrectIPVersionEvent(sct.recorder, "clusterIP", service.Spec.ClusterIP, service.Namespace, service.Name, service.UID)
return nil
}
}
serviceMap := make(ServiceMap)
for i := range service.Spec.Ports {
servicePort := &service.Spec.Ports[i]
svcPortName := ServicePortName{NamespacedName: svcName, Port: servicePort.Name}
baseSvcInfo := sct.newBaseServiceInfo(servicePort, service)
if sct.makeServiceInfo != nil {
serviceMap[svcPortName] = sct.makeServiceInfo(servicePort, service, baseSvcInfo)
} else {
serviceMap[svcPortName] = baseSvcInfo
}
}
return serviceMap
}
// apply the changes to ServiceMap and update the stale udp cluster IP set. The UDPStaleClusterIP argument is passed in to store the
// udp protocol service cluster ip when service is deleted from the ServiceMap.
func (serviceMap *ServiceMap) apply(changes *ServiceChangeTracker, UDPStaleClusterIP sets.String) {
changes.lock.Lock()
defer changes.lock.Unlock()
for _, change := range changes.items {
serviceMap.merge(change.current)
// filter out the Update event of current changes from previous changes before calling unmerge() so that can
// skip deleting the Update events.
change.previous.filter(change.current)
serviceMap.unmerge(change.previous, UDPStaleClusterIP)
}
// clear changes after applying them to ServiceMap.
changes.items = make(map[types.NamespacedName]*serviceChange)
return
}
// merge adds other ServiceMap's elements to current ServiceMap.
// If collision, other ALWAYS win. Otherwise add the other to current.
// In other words, if some elements in current collisions with other, update the current by other.
// It returns a string type set which stores all the newly merged services' identifier, ServicePortName.String(), to help users
// tell if a service is deleted or updated.
// The returned value is one of the arguments of ServiceMap.unmerge().
// ServiceMap A Merge ServiceMap B will do following 2 things:
// * update ServiceMap A.
// * produce a string set which stores all other ServiceMap's ServicePortName.String().
// For example,
// - A{}
// - B{{"ns", "cluster-ip", "http"}: {"172.16.55.10", 1234, "TCP"}}
// - A updated to be {{"ns", "cluster-ip", "http"}: {"172.16.55.10", 1234, "TCP"}}
// - produce string set {"ns/cluster-ip:http"}
// - A{{"ns", "cluster-ip", "http"}: {"172.16.55.10", 345, "UDP"}}
// - B{{"ns", "cluster-ip", "http"}: {"172.16.55.10", 1234, "TCP"}}
// - A updated to be {{"ns", "cluster-ip", "http"}: {"172.16.55.10", 1234, "TCP"}}
// - produce string set {"ns/cluster-ip:http"}
func (sm *ServiceMap) merge(other ServiceMap) sets.String {
// existingPorts is going to store all identifiers of all services in `other` ServiceMap.
existingPorts := sets.NewString()
for svcPortName, info := range other {
// Take ServicePortName.String() as the newly merged service's identifier and put it into existingPorts.
existingPorts.Insert(svcPortName.String())
_, exists := (*sm)[svcPortName]
if !exists {
glog.V(1).Infof("Adding new service port %q at %s", svcPortName, info.String())
} else {
glog.V(1).Infof("Updating existing service port %q at %s", svcPortName, info.String())
}
(*sm)[svcPortName] = info
}
return existingPorts
}
// filter filters out elements from ServiceMap base on given ports string sets.
func (sm *ServiceMap) filter(other ServiceMap) {
for svcPortName := range *sm {
// skip the delete for Update event.
if _, ok := other[svcPortName]; ok {
delete(*sm, svcPortName)
}
}
}
// unmerge deletes all other ServiceMap's elements from current ServiceMap. We pass in the UDPStaleClusterIP strings sets
// for storing the stale udp service cluster IPs. We will clear stale udp connection base on UDPStaleClusterIP later
func (sm *ServiceMap) unmerge(other ServiceMap, UDPStaleClusterIP sets.String) {
for svcPortName := range other {
info, exists := (*sm)[svcPortName]
if exists {
glog.V(1).Infof("Removing service port %q", svcPortName)
if info.GetProtocol() == api.ProtocolUDP {
UDPStaleClusterIP.Insert(info.ClusterIPString())
}
delete(*sm, svcPortName)
} else {
glog.Errorf("Service port %q doesn't exists", svcPortName)
}
}
}

View File

@ -20,11 +20,12 @@ import (
"fmt"
"k8s.io/apimachinery/pkg/types"
api "k8s.io/kubernetes/pkg/apis/core"
)
// ProxyProvider is the interface provided by proxier implementations.
type ProxyProvider interface {
// Sync immediately synchronizes the ProxyProvider's current state to iptables.
// Sync immediately synchronizes the ProxyProvider's current state to proxy rules.
Sync()
// SyncLoop runs periodic work.
// This is expected to run as a goroutine or as the main loop of the app.
@ -33,7 +34,7 @@ type ProxyProvider interface {
}
// ServicePortName carries a namespace + name + portname. This is the unique
// identfier for a load-balanced service.
// identifier for a load-balanced service.
type ServicePortName struct {
types.NamespacedName
Port string
@ -42,3 +43,37 @@ type ServicePortName struct {
func (spn ServicePortName) String() string {
return fmt.Sprintf("%s:%s", spn.NamespacedName.String(), spn.Port)
}
// ServicePort is an interface which abstracts information about a service.
type ServicePort interface {
// String returns service string. An example format can be: `IP:Port/Protocol`.
String() string
// ClusterIPString returns service cluster IP in string format.
ClusterIPString() string
// GetProtocol returns service protocol.
GetProtocol() api.Protocol
// GetHealthCheckNodePort returns service health check node port if present. If return 0, it means not present.
GetHealthCheckNodePort() int
}
// Endpoint in an interface which abstracts information about an endpoint.
// TODO: Rename functions to be consistent with ServicePort.
type Endpoint interface {
// String returns endpoint string. An example format can be: `IP:Port`.
// We take the returned value as ServiceEndpoint.Endpoint.
String() string
// GetIsLocal returns true if the endpoint is running in same host as kube-proxy, otherwise returns false.
GetIsLocal() bool
// IP returns IP part of the endpoint.
IP() string
// Port returns the Port part of the endpoint.
Port() (int, error)
// Equal checks if two endpoints are equal.
Equal(Endpoint) bool
}
// ServiceEndpoint is used to identify a service and one of its endpoint pair.
type ServiceEndpoint struct {
Endpoint string
ServicePortName ServicePortName
}

View File

@ -47,6 +47,7 @@ func IPPart(s string) string {
return ""
}
// PortPart returns just the port part of an endpoint string.
func PortPart(s string) (int, error) {
// Must be IP:port
_, port, err := net.SplitHostPort(s)

45
vendor/k8s.io/kubernetes/pkg/proxy/util/network.go generated vendored Normal file
View File

@ -0,0 +1,45 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"net"
)
// NetworkInterfacer defines an interface for several net library functions. Production
// code will forward to net library functions, and unit tests will override the methods
// for testing purposes.
type NetworkInterfacer interface {
Addrs(intf *net.Interface) ([]net.Addr, error)
Interfaces() ([]net.Interface, error)
}
// RealNetwork implements the NetworkInterfacer interface for production code, just
// wrapping the underlying net library function calls.
type RealNetwork struct{}
// Addrs wraps net.Interface.Addrs(), it's a part of NetworkInterfacer interface.
func (_ RealNetwork) Addrs(intf *net.Interface) ([]net.Addr, error) {
return intf.Addrs()
}
// Interfaces wraps net.Interfaces(), it's a part of NetworkInterfacer interface.
func (_ RealNetwork) Interfaces() ([]net.Interface, error) {
return net.Interfaces()
}
var _ NetworkInterfacer = &RealNetwork{}

View File

@ -17,15 +17,32 @@ limitations under the License.
package util
import (
"fmt"
"net"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/client-go/tools/record"
api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/apis/core/helper"
utilnet "k8s.io/kubernetes/pkg/util/net"
"github.com/golang/glog"
)
const (
IPv4ZeroCIDR = "0.0.0.0/0"
IPv6ZeroCIDR = "::/0"
)
func IsZeroCIDR(cidr string) bool {
if cidr == IPv4ZeroCIDR || cidr == IPv6ZeroCIDR {
return true
}
return false
}
func IsLocalIP(ip string) (bool, error) {
addrs, err := net.InterfaceAddrs()
if err != nil {
@ -56,3 +73,76 @@ func ShouldSkipService(svcName types.NamespacedName, service *api.Service) bool
}
return false
}
// GetNodeAddresses return all matched node IP addresses based on given cidr slice.
// Some callers, e.g. IPVS proxier, need concrete IPs, not ranges, which is why this exists.
// NetworkInterfacer is injected for test purpose.
// We expect the cidrs passed in is already validated.
// Given an empty input `[]`, it will return `0.0.0.0/0` and `::/0` directly.
// If multiple cidrs is given, it will return the minimal IP sets, e.g. given input `[1.2.0.0/16, 0.0.0.0/0]`, it will
// only return `0.0.0.0/0`.
// NOTE: GetNodeAddresses only accepts CIDRs, if you want concrete IPs, e.g. 1.2.3.4, then the input should be 1.2.3.4/32.
func GetNodeAddresses(cidrs []string, nw NetworkInterfacer) (sets.String, error) {
uniqueAddressList := sets.NewString()
if len(cidrs) == 0 {
uniqueAddressList.Insert(IPv4ZeroCIDR)
uniqueAddressList.Insert(IPv6ZeroCIDR)
return uniqueAddressList, nil
}
// First round of iteration to pick out `0.0.0.0/0` or `::/0` for the sake of excluding non-zero IPs.
for _, cidr := range cidrs {
if IsZeroCIDR(cidr) {
uniqueAddressList.Insert(cidr)
}
}
// Second round of iteration to parse IPs based on cidr.
for _, cidr := range cidrs {
if IsZeroCIDR(cidr) {
continue
}
_, ipNet, _ := net.ParseCIDR(cidr)
itfs, err := nw.Interfaces()
if err != nil {
return nil, fmt.Errorf("error listing all interfaces from host, error: %v", err)
}
for _, itf := range itfs {
addrs, err := nw.Addrs(&itf)
if err != nil {
return nil, fmt.Errorf("error getting address from interface %s, error: %v", itf.Name, err)
}
for _, addr := range addrs {
if addr == nil {
continue
}
ip, _, err := net.ParseCIDR(addr.String())
if err != nil {
return nil, fmt.Errorf("error parsing CIDR for interface %s, error: %v", itf.Name, err)
}
if ipNet.Contains(ip) {
if utilnet.IsIPv6(ip) && !uniqueAddressList.Has(IPv6ZeroCIDR) {
uniqueAddressList.Insert(ip.String())
}
if !utilnet.IsIPv6(ip) && !uniqueAddressList.Has(IPv4ZeroCIDR) {
uniqueAddressList.Insert(ip.String())
}
}
}
}
}
return uniqueAddressList, nil
}
// LogAndEmitIncorrectIPVersionEvent logs and emits incorrect IP version event.
func LogAndEmitIncorrectIPVersionEvent(recorder record.EventRecorder, fieldName, fieldValue, svcNamespace, svcName string, svcUID types.UID) {
errMsg := fmt.Sprintf("%s in %s has incorrect IP version", fieldValue, fieldName)
glog.Errorf("%s (service %s/%s).", errMsg, svcNamespace, svcName)
if recorder != nil {
recorder.Eventf(
&v1.ObjectReference{
Kind: "Service",
Name: svcName,
Namespace: svcNamespace,
UID: svcUID,
}, v1.EventTypeWarning, "KubeProxyIncorrectIPVersion", errMsg)
}
}

View File

@ -1,5 +1,5 @@
/*
Copyright 2015 The Kubernetes Authors.
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@ -14,5 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
// Contains utility code for use by volume plugins.
package util // import "k8s.io/kubernetes/pkg/volume/util"
// +k8s:deepcopy-gen=package
// Package api contains scheduler API objects.
package api // import "k8s.io/kubernetes/pkg/scheduler/api"

55
vendor/k8s.io/kubernetes/pkg/scheduler/api/register.go generated vendored Normal file
View File

@ -0,0 +1,55 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package api
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
)
// Scheme is the default instance of runtime.Scheme to which types in the Kubernetes API are already registered.
// TODO: remove this, scheduler should not have its own scheme.
var Scheme = runtime.NewScheme()
// SchemeGroupVersion is group version used to register these objects
// TODO this should be in the "scheduler" group
var SchemeGroupVersion = schema.GroupVersion{Group: "", Version: runtime.APIVersionInternal}
var (
// SchemeBuilder defines a SchemeBuilder object.
SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
// AddToScheme is used to add stored functions to scheme.
AddToScheme = SchemeBuilder.AddToScheme
)
func init() {
if err := addKnownTypes(Scheme); err != nil {
// Programmer error.
panic(err)
}
}
func addKnownTypes(scheme *runtime.Scheme) error {
if err := scheme.AddIgnoredConversionType(&metav1.TypeMeta{}, &metav1.TypeMeta{}); err != nil {
return err
}
scheme.AddKnownTypes(SchemeGroupVersion,
&Policy{},
)
return nil
}

281
vendor/k8s.io/kubernetes/pkg/scheduler/api/types.go generated vendored Normal file
View File

@ -0,0 +1,281 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package api
import (
"time"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
restclient "k8s.io/client-go/rest"
)
const (
// MaxUint defines the max unsigned int value.
MaxUint = ^uint(0)
// MaxInt defines the max signed int value.
MaxInt = int(MaxUint >> 1)
// MaxTotalPriority defines the max total priority value.
MaxTotalPriority = MaxInt
// MaxPriority defines the max priority value.
MaxPriority = 10
// MaxWeight defines the max weight value.
MaxWeight = MaxInt / MaxPriority
// HighestUserDefinablePriority is the highest priority for user defined priority classes. Priority values larger than 1 billion are reserved for Kubernetes system use.
HighestUserDefinablePriority = int32(1000000000)
// SystemCriticalPriority is the beginning of the range of priority values for critical system components.
SystemCriticalPriority = 2 * HighestUserDefinablePriority
// NOTE: In order to avoid conflict of names with user-defined priority classes, all the names must
// start with scheduling.SystemPriorityClassPrefix which is by default "system-".
SystemClusterCritical = "system-cluster-critical"
SystemNodeCritical = "system-node-critical"
)
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// Policy describes a struct of a policy resource in api.
type Policy struct {
metav1.TypeMeta
// Holds the information to configure the fit predicate functions.
// If unspecified, the default predicate functions will be applied.
// If empty list, all predicates (except the mandatory ones) will be
// bypassed.
Predicates []PredicatePolicy
// Holds the information to configure the priority functions.
// If unspecified, the default priority functions will be applied.
// If empty list, all priority functions will be bypassed.
Priorities []PriorityPolicy
// Holds the information to communicate with the extender(s)
ExtenderConfigs []ExtenderConfig
// RequiredDuringScheduling affinity is not symmetric, but there is an implicit PreferredDuringScheduling affinity rule
// corresponding to every RequiredDuringScheduling affinity rule.
// HardPodAffinitySymmetricWeight represents the weight of implicit PreferredDuringScheduling affinity rule, in the range 1-100.
HardPodAffinitySymmetricWeight int32
// When AlwaysCheckAllPredicates is set to true, scheduler checks all
// the configured predicates even after one or more of them fails.
// When the flag is set to false, scheduler skips checking the rest
// of the predicates after it finds one predicate that failed.
AlwaysCheckAllPredicates bool
}
// PredicatePolicy describes a struct of a predicate policy.
type PredicatePolicy struct {
// Identifier of the predicate policy
// For a custom predicate, the name can be user-defined
// For the Kubernetes provided predicates, the name is the identifier of the pre-defined predicate
Name string
// Holds the parameters to configure the given predicate
Argument *PredicateArgument
}
// PriorityPolicy describes a struct of a priority policy.
type PriorityPolicy struct {
// Identifier of the priority policy
// For a custom priority, the name can be user-defined
// For the Kubernetes provided priority functions, the name is the identifier of the pre-defined priority function
Name string
// The numeric multiplier for the node scores that the priority function generates
// The weight should be a positive integer
Weight int
// Holds the parameters to configure the given priority function
Argument *PriorityArgument
}
// PredicateArgument represents the arguments to configure predicate functions in scheduler policy configuration.
// Only one of its members may be specified
type PredicateArgument struct {
// The predicate that provides affinity for pods belonging to a service
// It uses a label to identify nodes that belong to the same "group"
ServiceAffinity *ServiceAffinity
// The predicate that checks whether a particular node has a certain label
// defined or not, regardless of value
LabelsPresence *LabelsPresence
}
// PriorityArgument represents the arguments to configure priority functions in scheduler policy configuration.
// Only one of its members may be specified
type PriorityArgument struct {
// The priority function that ensures a good spread (anti-affinity) for pods belonging to a service
// It uses a label to identify nodes that belong to the same "group"
ServiceAntiAffinity *ServiceAntiAffinity
// The priority function that checks whether a particular node has a certain label
// defined or not, regardless of value
LabelPreference *LabelPreference
}
// ServiceAffinity holds the parameters that are used to configure the corresponding predicate in scheduler policy configuration.
type ServiceAffinity struct {
// The list of labels that identify node "groups"
// All of the labels should match for the node to be considered a fit for hosting the pod
Labels []string
}
// LabelsPresence holds the parameters that are used to configure the corresponding predicate in scheduler policy configuration.
type LabelsPresence struct {
// The list of labels that identify node "groups"
// All of the labels should be either present (or absent) for the node to be considered a fit for hosting the pod
Labels []string
// The boolean flag that indicates whether the labels should be present or absent from the node
Presence bool
}
// ServiceAntiAffinity holds the parameters that are used to configure the corresponding priority function
type ServiceAntiAffinity struct {
// Used to identify node "groups"
Label string
}
// LabelPreference holds the parameters that are used to configure the corresponding priority function
type LabelPreference struct {
// Used to identify node "groups"
Label string
// This is a boolean flag
// If true, higher priority is given to nodes that have the label
// If false, higher priority is given to nodes that do not have the label
Presence bool
}
// ExtenderManagedResource describes the arguments of extended resources
// managed by an extender.
type ExtenderManagedResource struct {
// Name is the extended resource name.
Name v1.ResourceName
// IgnoredByScheduler indicates whether kube-scheduler should ignore this
// resource when applying predicates.
IgnoredByScheduler bool
}
// ExtenderConfig holds the parameters used to communicate with the extender. If a verb is unspecified/empty,
// it is assumed that the extender chose not to provide that extension.
type ExtenderConfig struct {
// URLPrefix at which the extender is available
URLPrefix string
// Verb for the filter call, empty if not supported. This verb is appended to the URLPrefix when issuing the filter call to extender.
FilterVerb string
// Verb for the prioritize call, empty if not supported. This verb is appended to the URLPrefix when issuing the prioritize call to extender.
PrioritizeVerb string
// The numeric multiplier for the node scores that the prioritize call generates.
// The weight should be a positive integer
Weight int
// Verb for the bind call, empty if not supported. This verb is appended to the URLPrefix when issuing the bind call to extender.
// If this method is implemented by the extender, it is the extender's responsibility to bind the pod to apiserver. Only one extender
// can implement this function.
BindVerb string
// EnableHTTPS specifies whether https should be used to communicate with the extender
EnableHTTPS bool
// TLSConfig specifies the transport layer security config
TLSConfig *restclient.TLSClientConfig
// HTTPTimeout specifies the timeout duration for a call to the extender. Filter timeout fails the scheduling of the pod. Prioritize
// timeout is ignored, k8s/other extenders priorities are used to select the node.
HTTPTimeout time.Duration
// NodeCacheCapable specifies that the extender is capable of caching node information,
// so the scheduler should only send minimal information about the eligible nodes
// assuming that the extender already cached full details of all nodes in the cluster
NodeCacheCapable bool
// ManagedResources is a list of extended resources that are managed by
// this extender.
// - A pod will be sent to the extender on the Filter, Prioritize and Bind
// (if the extender is the binder) phases iff the pod requests at least
// one of the extended resources in this list. If empty or unspecified,
// all pods will be sent to this extender.
// - If IgnoredByScheduler is set to true for a resource, kube-scheduler
// will skip checking the resource in predicates.
// +optional
ManagedResources []ExtenderManagedResource
}
// ExtenderArgs represents the arguments needed by the extender to filter/prioritize
// nodes for a pod.
type ExtenderArgs struct {
// Pod being scheduled
Pod v1.Pod
// List of candidate nodes where the pod can be scheduled; to be populated
// only if ExtenderConfig.NodeCacheCapable == false
Nodes *v1.NodeList
// List of candidate node names where the pod can be scheduled; to be
// populated only if ExtenderConfig.NodeCacheCapable == true
NodeNames *[]string
}
// FailedNodesMap represents the filtered out nodes, with node names and failure messages
type FailedNodesMap map[string]string
// ExtenderFilterResult represents the results of a filter call to an extender
type ExtenderFilterResult struct {
// Filtered set of nodes where the pod can be scheduled; to be populated
// only if ExtenderConfig.NodeCacheCapable == false
Nodes *v1.NodeList
// Filtered set of nodes where the pod can be scheduled; to be populated
// only if ExtenderConfig.NodeCacheCapable == true
NodeNames *[]string
// Filtered out nodes where the pod can't be scheduled and the failure messages
FailedNodes FailedNodesMap
// Error message indicating failure
Error string
}
// ExtenderBindingArgs represents the arguments to an extender for binding a pod to a node.
type ExtenderBindingArgs struct {
// PodName is the name of the pod being bound
PodName string
// PodNamespace is the namespace of the pod being bound
PodNamespace string
// PodUID is the UID of the pod being bound
PodUID types.UID
// Node selected by the scheduler
Node string
}
// ExtenderBindingResult represents the result of binding of a pod to a node from an extender.
type ExtenderBindingResult struct {
// Error message indicating failure
Error string
}
// HostPriority represents the priority of scheduling to a particular host, higher priority is better.
type HostPriority struct {
// Name of the host
Host string
// Score associated with the host
Score int
}
// HostPriorityList declares a []HostPriority type.
type HostPriorityList []HostPriority
// SystemPriorityClasses defines special priority classes which are used by system critical pods that should not be preempted by workload pods.
var SystemPriorityClasses = map[string]int32{
SystemClusterCritical: SystemCriticalPriority,
SystemNodeCritical: SystemCriticalPriority + 1000,
}
func (h HostPriorityList) Len() int {
return len(h)
}
func (h HostPriorityList) Less(i, j int) bool {
if h[i].Score == h[j].Score {
return h[i].Host < h[j].Host
}
return h[i].Score < h[j].Score
}
func (h HostPriorityList) Swap(i, j int) {
h[i], h[j] = h[j], h[i]
}

View File

@ -0,0 +1,485 @@
// +build !ignore_autogenerated
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by deepcopy-gen. DO NOT EDIT.
package api
import (
v1 "k8s.io/api/core/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
rest "k8s.io/client-go/rest"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ExtenderArgs) DeepCopyInto(out *ExtenderArgs) {
*out = *in
in.Pod.DeepCopyInto(&out.Pod)
if in.Nodes != nil {
in, out := &in.Nodes, &out.Nodes
if *in == nil {
*out = nil
} else {
*out = new(v1.NodeList)
(*in).DeepCopyInto(*out)
}
}
if in.NodeNames != nil {
in, out := &in.NodeNames, &out.NodeNames
if *in == nil {
*out = nil
} else {
*out = new([]string)
if **in != nil {
in, out := *in, *out
*out = make([]string, len(*in))
copy(*out, *in)
}
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtenderArgs.
func (in *ExtenderArgs) DeepCopy() *ExtenderArgs {
if in == nil {
return nil
}
out := new(ExtenderArgs)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ExtenderBindingArgs) DeepCopyInto(out *ExtenderBindingArgs) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtenderBindingArgs.
func (in *ExtenderBindingArgs) DeepCopy() *ExtenderBindingArgs {
if in == nil {
return nil
}
out := new(ExtenderBindingArgs)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ExtenderBindingResult) DeepCopyInto(out *ExtenderBindingResult) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtenderBindingResult.
func (in *ExtenderBindingResult) DeepCopy() *ExtenderBindingResult {
if in == nil {
return nil
}
out := new(ExtenderBindingResult)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ExtenderConfig) DeepCopyInto(out *ExtenderConfig) {
*out = *in
if in.TLSConfig != nil {
in, out := &in.TLSConfig, &out.TLSConfig
if *in == nil {
*out = nil
} else {
*out = new(rest.TLSClientConfig)
(*in).DeepCopyInto(*out)
}
}
if in.ManagedResources != nil {
in, out := &in.ManagedResources, &out.ManagedResources
*out = make([]ExtenderManagedResource, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtenderConfig.
func (in *ExtenderConfig) DeepCopy() *ExtenderConfig {
if in == nil {
return nil
}
out := new(ExtenderConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ExtenderFilterResult) DeepCopyInto(out *ExtenderFilterResult) {
*out = *in
if in.Nodes != nil {
in, out := &in.Nodes, &out.Nodes
if *in == nil {
*out = nil
} else {
*out = new(v1.NodeList)
(*in).DeepCopyInto(*out)
}
}
if in.NodeNames != nil {
in, out := &in.NodeNames, &out.NodeNames
if *in == nil {
*out = nil
} else {
*out = new([]string)
if **in != nil {
in, out := *in, *out
*out = make([]string, len(*in))
copy(*out, *in)
}
}
}
if in.FailedNodes != nil {
in, out := &in.FailedNodes, &out.FailedNodes
*out = make(FailedNodesMap, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtenderFilterResult.
func (in *ExtenderFilterResult) DeepCopy() *ExtenderFilterResult {
if in == nil {
return nil
}
out := new(ExtenderFilterResult)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ExtenderManagedResource) DeepCopyInto(out *ExtenderManagedResource) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtenderManagedResource.
func (in *ExtenderManagedResource) DeepCopy() *ExtenderManagedResource {
if in == nil {
return nil
}
out := new(ExtenderManagedResource)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in FailedNodesMap) DeepCopyInto(out *FailedNodesMap) {
{
in := &in
*out = make(FailedNodesMap, len(*in))
for key, val := range *in {
(*out)[key] = val
}
return
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FailedNodesMap.
func (in FailedNodesMap) DeepCopy() FailedNodesMap {
if in == nil {
return nil
}
out := new(FailedNodesMap)
in.DeepCopyInto(out)
return *out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *HostPriority) DeepCopyInto(out *HostPriority) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostPriority.
func (in *HostPriority) DeepCopy() *HostPriority {
if in == nil {
return nil
}
out := new(HostPriority)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in HostPriorityList) DeepCopyInto(out *HostPriorityList) {
{
in := &in
*out = make(HostPriorityList, len(*in))
copy(*out, *in)
return
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostPriorityList.
func (in HostPriorityList) DeepCopy() HostPriorityList {
if in == nil {
return nil
}
out := new(HostPriorityList)
in.DeepCopyInto(out)
return *out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *LabelPreference) DeepCopyInto(out *LabelPreference) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LabelPreference.
func (in *LabelPreference) DeepCopy() *LabelPreference {
if in == nil {
return nil
}
out := new(LabelPreference)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *LabelsPresence) DeepCopyInto(out *LabelsPresence) {
*out = *in
if in.Labels != nil {
in, out := &in.Labels, &out.Labels
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LabelsPresence.
func (in *LabelsPresence) DeepCopy() *LabelsPresence {
if in == nil {
return nil
}
out := new(LabelsPresence)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Policy) DeepCopyInto(out *Policy) {
*out = *in
out.TypeMeta = in.TypeMeta
if in.Predicates != nil {
in, out := &in.Predicates, &out.Predicates
*out = make([]PredicatePolicy, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Priorities != nil {
in, out := &in.Priorities, &out.Priorities
*out = make([]PriorityPolicy, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.ExtenderConfigs != nil {
in, out := &in.ExtenderConfigs, &out.ExtenderConfigs
*out = make([]ExtenderConfig, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Policy.
func (in *Policy) DeepCopy() *Policy {
if in == nil {
return nil
}
out := new(Policy)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *Policy) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PredicateArgument) DeepCopyInto(out *PredicateArgument) {
*out = *in
if in.ServiceAffinity != nil {
in, out := &in.ServiceAffinity, &out.ServiceAffinity
if *in == nil {
*out = nil
} else {
*out = new(ServiceAffinity)
(*in).DeepCopyInto(*out)
}
}
if in.LabelsPresence != nil {
in, out := &in.LabelsPresence, &out.LabelsPresence
if *in == nil {
*out = nil
} else {
*out = new(LabelsPresence)
(*in).DeepCopyInto(*out)
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PredicateArgument.
func (in *PredicateArgument) DeepCopy() *PredicateArgument {
if in == nil {
return nil
}
out := new(PredicateArgument)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PredicatePolicy) DeepCopyInto(out *PredicatePolicy) {
*out = *in
if in.Argument != nil {
in, out := &in.Argument, &out.Argument
if *in == nil {
*out = nil
} else {
*out = new(PredicateArgument)
(*in).DeepCopyInto(*out)
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PredicatePolicy.
func (in *PredicatePolicy) DeepCopy() *PredicatePolicy {
if in == nil {
return nil
}
out := new(PredicatePolicy)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PriorityArgument) DeepCopyInto(out *PriorityArgument) {
*out = *in
if in.ServiceAntiAffinity != nil {
in, out := &in.ServiceAntiAffinity, &out.ServiceAntiAffinity
if *in == nil {
*out = nil
} else {
*out = new(ServiceAntiAffinity)
**out = **in
}
}
if in.LabelPreference != nil {
in, out := &in.LabelPreference, &out.LabelPreference
if *in == nil {
*out = nil
} else {
*out = new(LabelPreference)
**out = **in
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PriorityArgument.
func (in *PriorityArgument) DeepCopy() *PriorityArgument {
if in == nil {
return nil
}
out := new(PriorityArgument)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PriorityPolicy) DeepCopyInto(out *PriorityPolicy) {
*out = *in
if in.Argument != nil {
in, out := &in.Argument, &out.Argument
if *in == nil {
*out = nil
} else {
*out = new(PriorityArgument)
(*in).DeepCopyInto(*out)
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PriorityPolicy.
func (in *PriorityPolicy) DeepCopy() *PriorityPolicy {
if in == nil {
return nil
}
out := new(PriorityPolicy)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ServiceAffinity) DeepCopyInto(out *ServiceAffinity) {
*out = *in
if in.Labels != nil {
in, out := &in.Labels, &out.Labels
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAffinity.
func (in *ServiceAffinity) DeepCopy() *ServiceAffinity {
if in == nil {
return nil
}
out := new(ServiceAffinity)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ServiceAntiAffinity) DeepCopyInto(out *ServiceAntiAffinity) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAntiAffinity.
func (in *ServiceAntiAffinity) DeepCopy() *ServiceAntiAffinity {
if in == nil {
return nil
}
out := new(ServiceAntiAffinity)
in.DeepCopyInto(out)
return out
}

186
vendor/k8s.io/kubernetes/pkg/serviceaccount/claims.go generated vendored Normal file
View File

@ -0,0 +1,186 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package serviceaccount
import (
"errors"
"fmt"
"time"
"github.com/golang/glog"
apiserverserviceaccount "k8s.io/apiserver/pkg/authentication/serviceaccount"
"k8s.io/kubernetes/pkg/apis/core"
"gopkg.in/square/go-jose.v2/jwt"
)
// time.Now stubbed out to allow testing
var now = time.Now
type privateClaims struct {
Kubernetes kubernetes `json:"kubernetes.io,omitempty"`
}
type kubernetes struct {
Namespace string `json:"namespace,omitempty"`
Svcacct ref `json:"serviceaccount,omitempty"`
Pod *ref `json:"pod,omitempty"`
Secret *ref `json:"secret,omitempty"`
}
type ref struct {
Name string `json:"name,omitempty"`
UID string `json:"uid,omitempty"`
}
func Claims(sa core.ServiceAccount, pod *core.Pod, secret *core.Secret, expirationSeconds int64, audience []string) (*jwt.Claims, interface{}) {
now := now()
sc := &jwt.Claims{
Subject: apiserverserviceaccount.MakeUsername(sa.Namespace, sa.Name),
Audience: jwt.Audience(audience),
IssuedAt: jwt.NewNumericDate(now),
NotBefore: jwt.NewNumericDate(now),
Expiry: jwt.NewNumericDate(now.Add(time.Duration(expirationSeconds) * time.Second)),
}
pc := &privateClaims{
Kubernetes: kubernetes{
Namespace: sa.Namespace,
Svcacct: ref{
Name: sa.Name,
UID: string(sa.UID),
},
},
}
switch {
case pod != nil:
pc.Kubernetes.Pod = &ref{
Name: pod.Name,
UID: string(pod.UID),
}
case secret != nil:
pc.Kubernetes.Secret = &ref{
Name: secret.Name,
UID: string(secret.UID),
}
}
return sc, pc
}
func NewValidator(audiences []string, getter ServiceAccountTokenGetter) Validator {
return &validator{
auds: audiences,
getter: getter,
}
}
type validator struct {
auds []string
getter ServiceAccountTokenGetter
}
var _ = Validator(&validator{})
func (v *validator) Validate(_ string, public *jwt.Claims, privateObj interface{}) (string, string, string, error) {
private, ok := privateObj.(*privateClaims)
if !ok {
glog.Errorf("jwt validator expected private claim of type *privateClaims but got: %T", privateObj)
return "", "", "", errors.New("Token could not be validated.")
}
err := public.Validate(jwt.Expected{
Time: now(),
})
switch {
case err == nil:
case err == jwt.ErrExpired:
return "", "", "", errors.New("Token has expired.")
default:
glog.Errorf("unexpected validation error: %T", err)
return "", "", "", errors.New("Token could not be validated.")
}
var audValid bool
for _, aud := range v.auds {
audValid = public.Audience.Contains(aud)
if audValid {
break
}
}
if !audValid {
return "", "", "", errors.New("Token is invalid for this audience.")
}
namespace := private.Kubernetes.Namespace
saref := private.Kubernetes.Svcacct
podref := private.Kubernetes.Pod
secref := private.Kubernetes.Secret
// Make sure service account still exists (name and UID)
serviceAccount, err := v.getter.GetServiceAccount(namespace, saref.Name)
if err != nil {
glog.V(4).Infof("Could not retrieve service account %s/%s: %v", namespace, saref.Name, err)
return "", "", "", err
}
if serviceAccount.DeletionTimestamp != nil {
glog.V(4).Infof("Service account has been deleted %s/%s", namespace, saref.Name)
return "", "", "", fmt.Errorf("ServiceAccount %s/%s has been deleted", namespace, saref.Name)
}
if string(serviceAccount.UID) != saref.UID {
glog.V(4).Infof("Service account UID no longer matches %s/%s: %q != %q", namespace, saref.Name, string(serviceAccount.UID), saref.UID)
return "", "", "", fmt.Errorf("ServiceAccount UID (%s) does not match claim (%s)", serviceAccount.UID, saref.UID)
}
if secref != nil {
// Make sure token hasn't been invalidated by deletion of the secret
secret, err := v.getter.GetSecret(namespace, secref.Name)
if err != nil {
glog.V(4).Infof("Could not retrieve bound secret %s/%s for service account %s/%s: %v", namespace, secref.Name, namespace, saref.Name, err)
return "", "", "", errors.New("Token has been invalidated")
}
if secret.DeletionTimestamp != nil {
glog.V(4).Infof("Bound secret is deleted and awaiting removal: %s/%s for service account %s/%s", namespace, secref.Name, namespace, saref.Name)
return "", "", "", errors.New("Token has been invalidated")
}
if string(secref.UID) != secref.UID {
glog.V(4).Infof("Secret UID no longer matches %s/%s: %q != %q", namespace, secref.Name, string(serviceAccount.UID), secref.UID)
return "", "", "", fmt.Errorf("Secret UID (%s) does not match claim (%s)", secret.UID, secref.UID)
}
}
if podref != nil {
// Make sure token hasn't been invalidated by deletion of the pod
pod, err := v.getter.GetPod(namespace, podref.Name)
if err != nil {
glog.V(4).Infof("Could not retrieve bound secret %s/%s for service account %s/%s: %v", namespace, podref.Name, namespace, saref.Name, err)
return "", "", "", errors.New("Token has been invalidated")
}
if pod.DeletionTimestamp != nil {
glog.V(4).Infof("Bound pod is deleted and awaiting removal: %s/%s for service account %s/%s", namespace, podref.Name, namespace, saref.Name)
return "", "", "", errors.New("Token has been invalidated")
}
if string(podref.UID) != podref.UID {
glog.V(4).Infof("Pod UID no longer matches %s/%s: %q != %q", namespace, podref.Name, string(serviceAccount.UID), podref.UID)
return "", "", "", fmt.Errorf("Pod UID (%s) does not match claim (%s)", pod.UID, podref.UID)
}
}
return private.Kubernetes.Namespace, private.Kubernetes.Svcacct.Name, private.Kubernetes.Svcacct.UID, nil
}
func (v *validator) NewPrivateClaims() interface{} {
return &privateClaims{}
}

View File

@ -17,7 +17,6 @@ limitations under the License.
package serviceaccount
import (
"bytes"
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rsa"
@ -30,10 +29,8 @@ import (
"k8s.io/api/core/v1"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/apiserver/pkg/authentication/authenticator"
apiserverserviceaccount "k8s.io/apiserver/pkg/authentication/serviceaccount"
"k8s.io/apiserver/pkg/authentication/user"
"github.com/golang/glog"
jose "gopkg.in/square/go-jose.v2"
"gopkg.in/square/go-jose.v2/jwt"
)
@ -41,6 +38,7 @@ import (
// ServiceAccountTokenGetter defines functions to retrieve a named service account and secret
type ServiceAccountTokenGetter interface {
GetServiceAccount(namespace, name string) (*v1.ServiceAccount, error)
GetPod(namespace, name string) (*v1.Pod, error)
GetSecret(namespace, name string) (*v1.Secret, error)
}
@ -113,14 +111,11 @@ func (j *jwtTokenGenerator) GenerateToken(claims *jwt.Claims, privateClaims inte
// JWTTokenAuthenticator authenticates tokens as JWT tokens produced by JWTTokenGenerator
// Token signatures are verified using each of the given public keys until one works (allowing key rotation)
// If lookup is true, the service account and secret referenced as claims inside the token are retrieved and verified with the provided ServiceAccountTokenGetter
func JWTTokenAuthenticator(iss string, keys []interface{}, lookup bool, getter ServiceAccountTokenGetter) authenticator.Token {
func JWTTokenAuthenticator(iss string, keys []interface{}, validator Validator) authenticator.Token {
return &jwtTokenAuthenticator{
iss: iss,
keys: keys,
validator: &legacyValidator{
lookup: lookup,
getter: getter,
},
iss: iss,
keys: keys,
validator: validator,
}
}
@ -130,8 +125,19 @@ type jwtTokenAuthenticator struct {
validator Validator
}
// Validator is called by the JWT token authentictaor to apply domain specific
// validation to a token and extract user information.
type Validator interface {
Validate(tokenData string, public *jwt.Claims, private *legacyPrivateClaims) error
// Validate validates a token and returns user information or an error.
// Validator can assume that the issuer and signature of a token are already
// verified when this function is called.
Validate(tokenData string, public *jwt.Claims, private interface{}) (namespace, name, uid string, err error)
// NewPrivateClaims returns a struct that the authenticator should
// deserialize the JWT payload into. The authenticator may then pass this
// struct back to the Validator as the 'private' argument to a Validate()
// call. This struct should contain fields for any private claims that the
// Validator requires to validate the JWT.
NewPrivateClaims() interface{}
}
var errMismatchedSigningMethod = errors.New("invalid signing method")
@ -147,7 +153,7 @@ func (j *jwtTokenAuthenticator) AuthenticateToken(tokenData string) (user.Info,
}
public := &jwt.Claims{}
private := &legacyPrivateClaims{}
private := j.validator.NewPrivateClaims()
var (
found bool
@ -168,12 +174,12 @@ func (j *jwtTokenAuthenticator) AuthenticateToken(tokenData string) (user.Info,
// If we get here, we have a token with a recognized signature and
// issuer string.
if err := j.validator.Validate(tokenData, public, private); err != nil {
ns, name, uid, err := j.validator.Validate(tokenData, public, private)
if err != nil {
return nil, false, err
}
return UserInfo(private.Namespace, private.ServiceAccountName, private.ServiceAccountUID), true, nil
return UserInfo(ns, name, uid), true, nil
}
// hasCorrectIssuer returns true if tokenData is a valid JWT in compact
@ -204,71 +210,3 @@ func (j *jwtTokenAuthenticator) hasCorrectIssuer(tokenData string) bool {
return true
}
type legacyValidator struct {
lookup bool
getter ServiceAccountTokenGetter
}
func (v *legacyValidator) Validate(tokenData string, public *jwt.Claims, private *legacyPrivateClaims) error {
// Make sure the claims we need exist
if len(public.Subject) == 0 {
return errors.New("sub claim is missing")
}
namespace := private.Namespace
if len(namespace) == 0 {
return errors.New("namespace claim is missing")
}
secretName := private.SecretName
if len(secretName) == 0 {
return errors.New("secretName claim is missing")
}
serviceAccountName := private.ServiceAccountName
if len(serviceAccountName) == 0 {
return errors.New("serviceAccountName claim is missing")
}
serviceAccountUID := private.ServiceAccountUID
if len(serviceAccountUID) == 0 {
return errors.New("serviceAccountUID claim is missing")
}
subjectNamespace, subjectName, err := apiserverserviceaccount.SplitUsername(public.Subject)
if err != nil || subjectNamespace != namespace || subjectName != serviceAccountName {
return errors.New("sub claim is invalid")
}
if v.lookup {
// Make sure token hasn't been invalidated by deletion of the secret
secret, err := v.getter.GetSecret(namespace, secretName)
if err != nil {
glog.V(4).Infof("Could not retrieve token %s/%s for service account %s/%s: %v", namespace, secretName, namespace, serviceAccountName, err)
return errors.New("Token has been invalidated")
}
if secret.DeletionTimestamp != nil {
glog.V(4).Infof("Token is deleted and awaiting removal: %s/%s for service account %s/%s", namespace, secretName, namespace, serviceAccountName)
return errors.New("Token has been invalidated")
}
if bytes.Compare(secret.Data[v1.ServiceAccountTokenKey], []byte(tokenData)) != 0 {
glog.V(4).Infof("Token contents no longer matches %s/%s for service account %s/%s", namespace, secretName, namespace, serviceAccountName)
return errors.New("Token does not match server's copy")
}
// Make sure service account still exists (name and UID)
serviceAccount, err := v.getter.GetServiceAccount(namespace, serviceAccountName)
if err != nil {
glog.V(4).Infof("Could not retrieve service account %s/%s: %v", namespace, serviceAccountName, err)
return err
}
if serviceAccount.DeletionTimestamp != nil {
glog.V(4).Infof("Service account has been deleted %s/%s", namespace, serviceAccountName)
return fmt.Errorf("ServiceAccount %s/%s has been deleted", namespace, serviceAccountName)
}
if string(serviceAccount.UID) != serviceAccountUID {
glog.V(4).Infof("Service account UID no longer matches %s/%s: %q != %q", namespace, serviceAccountName, string(serviceAccount.UID), serviceAccountUID)
return fmt.Errorf("ServiceAccount UID (%s) does not match claim (%s)", serviceAccount.UID, serviceAccountUID)
}
}
return nil
}

View File

@ -17,9 +17,14 @@ limitations under the License.
package serviceaccount
import (
"bytes"
"errors"
"fmt"
"k8s.io/api/core/v1"
apiserverserviceaccount "k8s.io/apiserver/pkg/authentication/serviceaccount"
"github.com/golang/glog"
"gopkg.in/square/go-jose.v2/jwt"
)
@ -42,3 +47,89 @@ type legacyPrivateClaims struct {
SecretName string `json:"kubernetes.io/serviceaccount/secret.name"`
Namespace string `json:"kubernetes.io/serviceaccount/namespace"`
}
func NewLegacyValidator(lookup bool, getter ServiceAccountTokenGetter) Validator {
return &legacyValidator{
lookup: lookup,
getter: getter,
}
}
type legacyValidator struct {
lookup bool
getter ServiceAccountTokenGetter
}
var _ = Validator(&legacyValidator{})
func (v *legacyValidator) Validate(tokenData string, public *jwt.Claims, privateObj interface{}) (string, string, string, error) {
private, ok := privateObj.(*legacyPrivateClaims)
if !ok {
glog.Errorf("jwt validator expected private claim of type *legacyPrivateClaims but got: %T", privateObj)
return "", "", "", errors.New("Token could not be validated.")
}
// Make sure the claims we need exist
if len(public.Subject) == 0 {
return "", "", "", errors.New("sub claim is missing")
}
namespace := private.Namespace
if len(namespace) == 0 {
return "", "", "", errors.New("namespace claim is missing")
}
secretName := private.SecretName
if len(secretName) == 0 {
return "", "", "", errors.New("secretName claim is missing")
}
serviceAccountName := private.ServiceAccountName
if len(serviceAccountName) == 0 {
return "", "", "", errors.New("serviceAccountName claim is missing")
}
serviceAccountUID := private.ServiceAccountUID
if len(serviceAccountUID) == 0 {
return "", "", "", errors.New("serviceAccountUID claim is missing")
}
subjectNamespace, subjectName, err := apiserverserviceaccount.SplitUsername(public.Subject)
if err != nil || subjectNamespace != namespace || subjectName != serviceAccountName {
return "", "", "", errors.New("sub claim is invalid")
}
if v.lookup {
// Make sure token hasn't been invalidated by deletion of the secret
secret, err := v.getter.GetSecret(namespace, secretName)
if err != nil {
glog.V(4).Infof("Could not retrieve token %s/%s for service account %s/%s: %v", namespace, secretName, namespace, serviceAccountName, err)
return "", "", "", errors.New("Token has been invalidated")
}
if secret.DeletionTimestamp != nil {
glog.V(4).Infof("Token is deleted and awaiting removal: %s/%s for service account %s/%s", namespace, secretName, namespace, serviceAccountName)
return "", "", "", errors.New("Token has been invalidated")
}
if bytes.Compare(secret.Data[v1.ServiceAccountTokenKey], []byte(tokenData)) != 0 {
glog.V(4).Infof("Token contents no longer matches %s/%s for service account %s/%s", namespace, secretName, namespace, serviceAccountName)
return "", "", "", errors.New("Token does not match server's copy")
}
// Make sure service account still exists (name and UID)
serviceAccount, err := v.getter.GetServiceAccount(namespace, serviceAccountName)
if err != nil {
glog.V(4).Infof("Could not retrieve service account %s/%s: %v", namespace, serviceAccountName, err)
return "", "", "", err
}
if serviceAccount.DeletionTimestamp != nil {
glog.V(4).Infof("Service account has been deleted %s/%s", namespace, serviceAccountName)
return "", "", "", fmt.Errorf("ServiceAccount %s/%s has been deleted", namespace, serviceAccountName)
}
if string(serviceAccount.UID) != serviceAccountUID {
glog.V(4).Infof("Service account UID no longer matches %s/%s: %q != %q", namespace, serviceAccountName, string(serviceAccount.UID), serviceAccountUID)
return "", "", "", fmt.Errorf("ServiceAccount UID (%s) does not match claim (%s)", serviceAccount.UID, serviceAccountUID)
}
}
return private.Namespace, private.ServiceAccountName, private.ServiceAccountUID, nil
}
func (v *legacyValidator) NewPrivateClaims() interface{} {
return &legacyPrivateClaims{}
}

View File

@ -14,28 +14,25 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package util
package conntrack
import (
"fmt"
"net"
"strconv"
"strings"
"k8s.io/api/core/v1"
utilnet "k8s.io/kubernetes/pkg/util/net"
"k8s.io/utils/exec"
)
// Utilities for dealing with conntrack
// NoConnectionToDelete is the error string returned by conntrack when no matching connections are found
const NoConnectionToDelete = "0 flow entries have been deleted"
func IsIPv6(netIP net.IP) bool {
return netIP != nil && netIP.To4() == nil
}
func IsIPv6String(ip string) bool {
netIP := net.ParseIP(ip)
return IsIPv6(netIP)
func protoStr(proto v1.Protocol) string {
return strings.ToLower(string(proto))
}
func parametersWithFamily(isIPv6 bool, parameters ...string) []string {
@ -45,11 +42,11 @@ func parametersWithFamily(isIPv6 bool, parameters ...string) []string {
return parameters
}
// ClearUDPConntrackForIP uses the conntrack tool to delete the conntrack entries
// ClearEntriesForIP uses the conntrack tool to delete the conntrack entries
// for the UDP connections specified by the given service IP
func ClearUDPConntrackForIP(execer exec.Interface, ip string) error {
parameters := parametersWithFamily(IsIPv6String(ip), "-D", "--orig-dst", ip, "-p", "udp")
err := ExecConntrackTool(execer, parameters...)
func ClearEntriesForIP(execer exec.Interface, ip string, protocol v1.Protocol) error {
parameters := parametersWithFamily(utilnet.IsIPv6String(ip), "-D", "--orig-dst", ip, "-p", protoStr(protocol))
err := Exec(execer, parameters...)
if err != nil && !strings.Contains(err.Error(), NoConnectionToDelete) {
// TODO: Better handling for deletion failure. When failure occur, stale udp connection may not get flushed.
// These stale udp connection will keep black hole traffic. Making this a best effort operation for now, since it
@ -59,8 +56,8 @@ func ClearUDPConntrackForIP(execer exec.Interface, ip string) error {
return nil
}
// ExecConntrackTool executes the conntrack tool using the given parameters
func ExecConntrackTool(execer exec.Interface, parameters ...string) error {
// Exec executes the conntrack tool using the given parameters
func Exec(execer exec.Interface, parameters ...string) error {
conntrackPath, err := execer.LookPath("conntrack")
if err != nil {
return fmt.Errorf("error looking for path of conntrack: %v", err)
@ -72,29 +69,36 @@ func ExecConntrackTool(execer exec.Interface, parameters ...string) error {
return nil
}
// ClearUDPConntrackForPort uses the conntrack tool to delete the conntrack entries
// for the UDP connections specified by the port.
// Exists returns true if conntrack binary is installed.
func Exists(execer exec.Interface) bool {
_, err := execer.LookPath("conntrack")
return err == nil
}
// ClearEntriesForPort uses the conntrack tool to delete the conntrack entries
// for connections specified by the port.
// When a packet arrives, it will not go through NAT table again, because it is not "the first" packet.
// The solution is clearing the conntrack. Known issues:
// https://github.com/docker/docker/issues/8795
// https://github.com/kubernetes/kubernetes/issues/31983
func ClearUDPConntrackForPort(execer exec.Interface, port int, isIPv6 bool) error {
func ClearEntriesForPort(execer exec.Interface, port int, isIPv6 bool, protocol v1.Protocol) error {
if port <= 0 {
return fmt.Errorf("Wrong port number. The port number must be greater than zero")
}
parameters := parametersWithFamily(isIPv6, "-D", "-p", "udp", "--dport", strconv.Itoa(port))
err := ExecConntrackTool(execer, parameters...)
parameters := parametersWithFamily(isIPv6, "-D", "-p", protoStr(protocol), "--dport", strconv.Itoa(port))
err := Exec(execer, parameters...)
if err != nil && !strings.Contains(err.Error(), NoConnectionToDelete) {
return fmt.Errorf("error deleting conntrack entries for UDP port: %d, error: %v", port, err)
}
return nil
}
// ClearUDPConntrackForPeers uses the conntrack tool to delete the conntrack entries
// for the UDP connections specified by the {origin, dest} IP pair.
func ClearUDPConntrackForPeers(execer exec.Interface, origin, dest string) error {
parameters := parametersWithFamily(IsIPv6String(origin), "-D", "--orig-dst", origin, "--dst-nat", dest, "-p", "udp")
err := ExecConntrackTool(execer, parameters...)
// ClearEntriesForNAT uses the conntrack tool to delete the conntrack entries
// for connections specified by the {origin, dest} IP pair.
func ClearEntriesForNAT(execer exec.Interface, origin, dest string, protocol v1.Protocol) error {
parameters := parametersWithFamily(utilnet.IsIPv6String(origin), "-D", "--orig-dst", origin, "--dst-nat", dest,
"-p", protoStr(protocol))
err := Exec(execer, parameters...)
if err != nil && !strings.Contains(err.Error(), NoConnectionToDelete) {
// TODO: Better handling for deletion failure. When failure occur, stale udp connection may not get flushed.
// These stale udp connection will keep black hole traffic. Making this a best effort operation for now, since it

View File

@ -21,7 +21,8 @@ import (
"fmt"
"io/ioutil"
"os"
"os/exec"
"k8s.io/kubernetes/pkg/util/nsenter"
"github.com/golang/glog"
)
@ -54,25 +55,20 @@ type NsenterWriter struct{}
// WriteFile calls 'nsenter cat - > <the file>' and 'nsenter chmod' to create a
// file on the host.
func (writer *NsenterWriter) WriteFile(filename string, data []byte, perm os.FileMode) error {
cmd := "nsenter"
baseArgs := []string{
"--mount=/rootfs/proc/1/ns/mnt",
"--",
}
echoArgs := append(baseArgs, "sh", "-c", fmt.Sprintf("cat > %s", filename))
glog.V(5).Infof("Command to write data to file: %v %v", cmd, echoArgs)
command := exec.Command(cmd, echoArgs...)
command.Stdin = bytes.NewBuffer(data)
ne := nsenter.NewNsenter()
echoArgs := []string{"-c", fmt.Sprintf("cat > %s", filename)}
glog.V(5).Infof("nsenter: write data to file %s by nsenter", filename)
command := ne.Exec("sh", echoArgs)
command.SetStdin(bytes.NewBuffer(data))
outputBytes, err := command.CombinedOutput()
if err != nil {
glog.Errorf("Output from writing to %q: %v", filename, string(outputBytes))
return err
}
chmodArgs := append(baseArgs, "chmod", fmt.Sprintf("%o", perm), filename)
glog.V(5).Infof("Command to change permissions to file: %v %v", cmd, chmodArgs)
outputBytes, err = exec.Command(cmd, chmodArgs...).CombinedOutput()
chmodArgs := []string{fmt.Sprintf("%o", perm), filename}
glog.V(5).Infof("nsenter: change permissions of file %s to %s", filename, chmodArgs[0])
outputBytes, err = ne.Exec("chmod", chmodArgs).CombinedOutput()
if err != nil {
glog.Errorf("Output from chmod command: %v", string(outputBytes))
return err

View File

@ -266,7 +266,7 @@ func (mounter *Mounter) IsLikelyNotMountPoint(file string) (bool, error) {
if err != nil {
return true, err
}
rootStat, err := os.Lstat(file + "/..")
rootStat, err := os.Lstat(filepath.Dir(strings.TrimSuffix(file, "/")))
if err != nil {
return true, err
}
@ -432,7 +432,7 @@ func (mounter *Mounter) GetFileType(pathname string) (FileType, error) {
case syscall.S_IFBLK:
return FileTypeBlockDev, nil
case syscall.S_IFCHR:
return FileTypeBlockDev, nil
return FileTypeCharDev, nil
case syscall.S_IFDIR:
return FileTypeDirectory, nil
case syscall.S_IFREG:

Some files were not shown because too many files have changed in this diff Show More